#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
-#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
-#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
+#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this threshold, increase the watermark */
+#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
-/* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
+/* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
* will increase the watermark only if we hit a real underrun. */
#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
snd_pcm_t *pcm_handle;
+ char *paths_dir;
pa_alsa_fdlist *mixer_fdl;
pa_alsa_mixer_pdata *mixer_pd;
snd_mixer_t *mixer_handle;
fragment_size,
hwbuf_size,
tsched_watermark,
+ tsched_watermark_ref,
hwbuf_unused,
min_sleep,
min_wakeup,
rewind_safeguard;
pa_usec_t watermark_dec_not_before;
+ pa_usec_t min_latency_ref;
pa_memchunk memchunk;
char *device_name; /* name of the PCM device */
char *control_device; /* name of the control device */
- pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
+ pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
pa_bool_t first, after_rewind;
pa_assert(u);
- /* Use the full buffer if noone asked us for anything specific */
+ /* Use the full buffer if no one asked us for anything specific */
u->hwbuf_unused = 0;
if (u->use_tsched) {
return 0;
}
+/* Called from IO Context on unsuspend or from main thread when creating sink */
+static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
+ pa_bool_t in_thread)
+{
+ u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
+ &u->sink->sample_spec);
+
+ u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
+ u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
+
+ u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
+ u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
+
+ fix_min_sleep_wakeup(u);
+ fix_tsched_watermark(u);
+
+ if (in_thread)
+ pa_sink_set_latency_range_within_thread(u->sink,
+ u->min_latency_ref,
+ pa_bytes_to_usec(u->hwbuf_size, ss));
+ else {
+ pa_sink_set_latency_range(u->sink,
+ 0,
+ pa_bytes_to_usec(u->hwbuf_size, ss));
+
+ /* work-around assert in pa_sink_set_latency_within_thead,
+ keep track of min_latency and reuse it when
+ this routine is called from IO context */
+ u->min_latency_ref = u->sink->thread_info.min_latency;
+ }
+
+ pa_log_info("Time scheduling watermark is %0.2fms",
+ (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
+}
+
/* Called from IO context */
static int unsuspend(struct userdata *u) {
pa_sample_spec ss;
pa_log_info("Trying resume...");
- if ((is_spdif(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
+ if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
/* Need to open device in NONAUDIO mode */
int len = strlen(u->device_name) + 8;
u->first = TRUE;
u->since_start = 0;
+ /* reset the watermark to the value defined when sink was created */
+ if (u->use_tsched)
+ reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, TRUE);
+
pa_log_info("Resumed successfully...");
pa_xfree(device_name);
switch (code) {
- case PA_SINK_MESSAGE_FINISH_MOVE:
- case PA_SINK_MESSAGE_ADD_INPUT: {
- pa_sink_input *i = PA_SINK_INPUT(data);
- int r = 0;
-
- if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
- break;
-
- u->old_rate = u->sink->sample_spec.rate;
-
- /* Passthrough format, see if we need to reset sink sample rate */
- if (u->sink->sample_spec.rate == i->thread_info.sample_spec.rate)
- break;
-
- /* .. we do */
- if ((r = suspend(u)) < 0)
- return r;
-
- u->sink->sample_spec.rate = i->thread_info.sample_spec.rate;
-
- if ((r = unsuspend(u)) < 0)
- return r;
-
- break;
- }
-
- case PA_SINK_MESSAGE_START_MOVE:
- case PA_SINK_MESSAGE_REMOVE_INPUT: {
- pa_sink_input *i = PA_SINK_INPUT(data);
- int r = 0;
-
- if (PA_LIKELY(!pa_sink_input_is_passthrough(i)))
- break;
-
- /* Passthrough format, see if we need to reset sink sample rate */
- if (u->sink->sample_spec.rate == u->old_rate)
- break;
-
- /* .. we do */
- if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = suspend(u)) < 0))
- return r;
-
- u->sink->sample_spec.rate = u->old_rate;
-
- if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((r = unsuspend(u)) < 0))
- return r;
-
- break;
- }
-
case PA_SINK_MESSAGE_GET_LATENCY: {
pa_usec_t r = 0;
if (mask == SND_CTL_EVENT_MASK_REMOVE)
return 0;
+ if (!PA_SINK_IS_LINKED(u->sink->state))
+ return 0;
+
if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
return 0;
struct userdata *u = s->userdata;
pa_cvolume r;
char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
- pa_bool_t sync_volume = !!(s->flags & PA_SINK_SYNC_VOLUME);
+ pa_bool_t deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
pa_assert(u);
pa_assert(u->mixer_path);
/* Shift up by the base volume */
pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
- if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
+ if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
return;
/* Shift down by the base volume, so that 0dB becomes maximum volume */
pa_assert(u);
pa_assert(u->mixer_path);
pa_assert(u->mixer_handle);
- pa_assert(s->flags & PA_SINK_SYNC_VOLUME);
+ pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
/* Shift up by the base volume */
pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
- if (u->mixer_path->has_dB && u->sync_volume) {
+ if (u->mixer_path->has_dB && u->deferred_volume) {
pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
pa_log_info("Successfully enabled synchronous volume.");
} else
pa_idxset_free(u->formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
u->formats = pa_idxset_new(NULL, NULL);
+ /* Note: the logic below won't apply if we're using software encoding.
+ * This is fine for now since we don't support that via the passthrough
+ * framework, but this must be changed if we do. */
+
+ /* First insert non-PCM formats since we prefer those. */
+ PA_IDXSET_FOREACH(f, formats, idx) {
+ if (!pa_format_info_is_pcm(f))
+ pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
+ }
+
+ /* Now add any PCM formats */
PA_IDXSET_FOREACH(f, formats, idx) {
- pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
+ if (pa_format_info_is_pcm(f))
+ pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
}
return TRUE;
}
+static pa_bool_t sink_update_rate_cb(pa_sink *s, uint32_t rate)
+{
+ struct userdata *u = s->userdata;
+ pa_assert(u);
+
+ if (!PA_SINK_IS_OPENED(s->state)) {
+ pa_log_info("Updating rate for device %s, new rate is %d",u->device_name, rate);
+ u->sink->sample_spec.rate = rate;
+ return TRUE;
+ }
+ return FALSE;
+}
+
static int process_rewind(struct userdata *u) {
snd_pcm_sframes_t unused;
size_t rewind_nbytes, unused_nbytes, limit_nbytes;
}
- if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
+ if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
pa_usec_t volume_sleep;
pa_sink_volume_change_apply(u->sink, &volume_sleep);
- if (volume_sleep > 0)
- rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
+ if (volume_sleep > 0) {
+ if (rtpoll_sleep > 0)
+ rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
+ else
+ rtpoll_sleep = volume_sleep;
+ }
}
if (rtpoll_sleep > 0)
if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
goto fail;
- if (u->sink->flags & PA_SINK_SYNC_VOLUME)
+ if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
pa_sink_volume_change_apply(u->sink, NULL);
if (ret == 0)
pa_alsa_path_dump(u->mixer_path);
} else {
- if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
+ if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT, u->paths_dir)))
goto fail;
pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
if (need_mixer_callback) {
int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
- if (u->sink->flags & PA_SINK_SYNC_VOLUME) {
+ if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
u->mixer_pd = pa_alsa_mixer_pdata_new();
mixer_callback = io_mixer_callback;
struct userdata *u = NULL;
const char *dev_id = NULL;
- pa_sample_spec ss, requested_ss;
+ pa_sample_spec ss;
+ uint32_t alternate_sample_rate;
pa_channel_map map;
uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
size_t frame_size;
- pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE, set_formats = FALSE;
+ pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, set_formats = FALSE;
pa_sink_new_data data;
pa_alsa_profile_set *profile_set = NULL;
goto fail;
}
- requested_ss = ss;
+ alternate_sample_rate = m->core->alternate_sample_rate;
+ if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
+ pa_log("Failed to parse alternate sample rate");
+ goto fail;
+ }
+
frame_size = pa_frame_size(&ss);
nfrags = m->core->default_n_fragments;
goto fail;
}
- sync_volume = m->core->sync_volume;
- if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
- pa_log("Failed to parse sync_volume argument.");
+ deferred_volume = m->core->deferred_volume;
+ if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
+ pa_log("Failed to parse deferred_volume argument.");
goto fail;
}
u->module = m;
u->use_mmap = use_mmap;
u->use_tsched = use_tsched;
- u->sync_volume = sync_volume;
+ u->deferred_volume = deferred_volume;
u->first = TRUE;
u->rewind_safeguard = rewind_safeguard;
u->rtpoll = pa_rtpoll_new();
ma, "device_id",
pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
+ u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
+
if (reserve_init(u, dev_id) < 0)
goto fail;
* variable is impossible. */
namereg_fail = data.namereg_fail;
if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
- pa_log("Failed to parse boolean argument namereg_fail.");
+ pa_log("Failed to parse namereg_fail argument.");
pa_sink_new_data_done(&data);
goto fail;
}
pa_sink_new_data_set_sample_spec(&data, &ss);
pa_sink_new_data_set_channel_map(&data, &map);
+ pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
goto fail;
}
- if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
+ if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
&u->sink->thread_info.volume_change_safety_margin) < 0) {
- pa_log("Failed to parse sync_volume_safety_margin parameter");
+ pa_log("Failed to parse deferred_volume_safety_margin parameter");
goto fail;
}
- if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
+ if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
&u->sink->thread_info.volume_change_extra_delay) < 0) {
- pa_log("Failed to parse sync_volume_extra_delay parameter");
+ pa_log("Failed to parse deferred_volume_extra_delay parameter");
goto fail;
}
u->sink->update_requested_latency = sink_update_requested_latency_cb;
u->sink->set_state = sink_set_state_cb;
u->sink->set_port = sink_set_port_cb;
+ if (u->sink->alternate_sample_rate)
+ u->sink->update_rate = sink_update_rate_cb;
u->sink->userdata = u;
pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
}
if (u->use_tsched) {
- u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
-
- u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
- u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
-
- u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
- u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
-
- fix_min_sleep_wakeup(u);
- fix_tsched_watermark(u);
-
- pa_sink_set_latency_range(u->sink,
- 0,
- pa_bytes_to_usec(u->hwbuf_size, &ss));
-
- pa_log_info("Time scheduling watermark is %0.2fms",
- (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
+ u->tsched_watermark_ref = tsched_watermark;
+ reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
} else
pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
pa_xfree(u->device_name);
pa_xfree(u->control_device);
+ pa_xfree(u->paths_dir);
pa_xfree(u);
}