+ out_data[:] = data.ljust(len(out_data), b'\0')
+
+ def get_next_sample(self, frame_count):
+ fw = self.audio_segment_frame_width
+
+ data = b""
+ nb_frames = 0
+ if self.a_s_with_effect is not None:
+ segment = self.a_s_with_effect
+ max_val = int(segment.frame_count())
+
+ start_i = max(self.current_frame_with_effect, 0)
+ end_i = min(self.current_frame_with_effect + frame_count, max_val)
+
+ data += segment._data[(start_i * fw):(end_i * fw)]
+
+ frame_count = max(0, self.current_frame_with_effect + frame_count - max_val)
+
+ self.current_frame_with_effect += end_i - start_i
+ self.current_frame += end_i - start_i
+ nb_frames += end_i - start_i
+
+ if frame_count > 0:
+ self.a_s_with_effect = None
+
+ segment = self.current_audio_segment
+ max_val = int(segment.frame_count())
+
+ start_i = max(self.current_frame, 0)
+ end_i = min(self.current_frame + frame_count, max_val)
+ data += segment._data[(start_i * fw):(end_i * fw)]
+ nb_frames += end_i - start_i
+ self.current_frame += end_i - start_i
+
+ return [data, nb_frames]