2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
92 pa_thread_mq thread_mq
;
95 snd_pcm_t
*pcm_handle
;
97 pa_alsa_fdlist
*mixer_fdl
;
98 snd_mixer_t
*mixer_handle
;
99 pa_alsa_path_set
*mixer_path_set
;
100 pa_alsa_path
*mixer_path
;
102 pa_cvolume hardware_volume
;
114 watermark_inc_threshold
,
115 watermark_dec_threshold
;
117 pa_usec_t watermark_dec_not_before
;
120 pa_memchunk memchunk
;
122 char *device_name
; /* name of the PCM device */
123 char *control_device
; /* name of the control device */
125 pa_bool_t use_mmap
:1, use_tsched
:1;
127 pa_bool_t first
, after_rewind
;
129 pa_rtpoll_item
*alsa_rtpoll_item
;
131 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
133 pa_smoother
*smoother
;
134 uint64_t write_count
;
135 uint64_t since_start
;
136 pa_usec_t smoother_interval
;
137 pa_usec_t last_smoother_update
;
139 pa_reserve_wrapper
*reserve
;
140 pa_hook_slot
*reserve_slot
;
141 pa_reserve_monitor_wrapper
*monitor
;
142 pa_hook_slot
*monitor_slot
;
145 static void userdata_free(struct userdata
*u
);
147 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
151 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
152 return PA_HOOK_CANCEL
;
157 static void reserve_done(struct userdata
*u
) {
160 if (u
->reserve_slot
) {
161 pa_hook_slot_free(u
->reserve_slot
);
162 u
->reserve_slot
= NULL
;
166 pa_reserve_wrapper_unref(u
->reserve
);
171 static void reserve_update(struct userdata
*u
) {
172 const char *description
;
175 if (!u
->sink
|| !u
->reserve
)
178 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
179 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
182 static int reserve_init(struct userdata
*u
, const char *dname
) {
191 if (pa_in_system_mode())
194 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
197 /* We are resuming, try to lock the device */
198 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
206 pa_assert(!u
->reserve_slot
);
207 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
212 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
218 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
220 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
224 static void monitor_done(struct userdata
*u
) {
227 if (u
->monitor_slot
) {
228 pa_hook_slot_free(u
->monitor_slot
);
229 u
->monitor_slot
= NULL
;
233 pa_reserve_monitor_wrapper_unref(u
->monitor
);
238 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
244 if (pa_in_system_mode())
247 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
250 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
256 pa_assert(!u
->monitor_slot
);
257 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
262 static void fix_min_sleep_wakeup(struct userdata
*u
) {
263 size_t max_use
, max_use_2
;
266 pa_assert(u
->use_tsched
);
268 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
269 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
271 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
272 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
274 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
275 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
278 static void fix_tsched_watermark(struct userdata
*u
) {
281 pa_assert(u
->use_tsched
);
283 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
285 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
286 u
->tsched_watermark
= max_use
- u
->min_sleep
;
288 if (u
->tsched_watermark
< u
->min_wakeup
)
289 u
->tsched_watermark
= u
->min_wakeup
;
292 static void increase_watermark(struct userdata
*u
) {
293 size_t old_watermark
;
294 pa_usec_t old_min_latency
, new_min_latency
;
297 pa_assert(u
->use_tsched
);
299 /* First, just try to increase the watermark */
300 old_watermark
= u
->tsched_watermark
;
301 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
302 fix_tsched_watermark(u
);
304 if (old_watermark
!= u
->tsched_watermark
) {
305 pa_log_info("Increasing wakeup watermark to %0.2f ms",
306 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
310 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
311 old_min_latency
= u
->sink
->thread_info
.min_latency
;
312 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
313 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
315 if (old_min_latency
!= new_min_latency
) {
316 pa_log_info("Increasing minimal latency to %0.2f ms",
317 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
319 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
322 /* When we reach this we're officialy fucked! */
325 static void decrease_watermark(struct userdata
*u
) {
326 size_t old_watermark
;
330 pa_assert(u
->use_tsched
);
332 now
= pa_rtclock_now();
334 if (u
->watermark_dec_not_before
<= 0)
337 if (u
->watermark_dec_not_before
> now
)
340 old_watermark
= u
->tsched_watermark
;
342 if (u
->tsched_watermark
< u
->watermark_dec_step
)
343 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
345 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
347 fix_tsched_watermark(u
);
349 if (old_watermark
!= u
->tsched_watermark
)
350 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
351 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
353 /* We don't change the latency range*/
356 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
359 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
362 pa_assert(sleep_usec
);
363 pa_assert(process_usec
);
366 pa_assert(u
->use_tsched
);
368 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
370 if (usec
== (pa_usec_t
) -1)
371 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
373 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
378 *sleep_usec
= usec
- wm
;
382 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
383 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
384 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
385 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
389 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
394 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
396 pa_assert(err
!= -EAGAIN
);
399 pa_log_debug("%s: Buffer underrun!", call
);
401 if (err
== -ESTRPIPE
)
402 pa_log_debug("%s: System suspended!", call
);
404 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
405 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
414 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
416 pa_bool_t underrun
= FALSE
;
418 /* We use <= instead of < for this check here because an underrun
419 * only happens after the last sample was processed, not already when
420 * it is removed from the buffer. This is particularly important
421 * when block transfer is used. */
423 if (n_bytes
<= u
->hwbuf_size
)
424 left_to_play
= u
->hwbuf_size
- n_bytes
;
427 /* We got a dropout. What a mess! */
435 if (!u
->first
&& !u
->after_rewind
)
436 if (pa_log_ratelimit())
437 pa_log_info("Underrun!");
441 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
442 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
443 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
444 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
448 pa_bool_t reset_not_before
= TRUE
;
450 if (!u
->first
&& !u
->after_rewind
) {
451 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
452 increase_watermark(u
);
453 else if (left_to_play
> u
->watermark_dec_threshold
) {
454 reset_not_before
= FALSE
;
456 /* We decrease the watermark only if have actually
457 * been woken up by a timeout. If something else woke
458 * us up it's too easy to fulfill the deadlines... */
461 decrease_watermark(u
);
465 if (reset_not_before
)
466 u
->watermark_dec_not_before
= 0;
472 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
473 pa_bool_t work_done
= TRUE
;
474 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
479 pa_sink_assert_ref(u
->sink
);
482 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
488 pa_bool_t after_avail
= TRUE
;
490 /* First we determine how many samples are missing to fill the
491 * buffer up to 100% */
493 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
495 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
501 n_bytes
= (size_t) n
* u
->frame_size
;
504 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
507 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
512 /* We won't fill up the playback buffer before at least
513 * half the sleep time is over because otherwise we might
514 * ask for more data from the clients then they expect. We
515 * need to guarantee that clients only have to keep around
516 * a single hw buffer length. */
519 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
521 pa_log_debug("Not filling up, because too early.");
526 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
530 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
531 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
532 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
533 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
539 pa_log_debug("Not filling up, because not necessary.");
547 pa_log_debug("Not filling up, because already too many iterations.");
553 n_bytes
-= u
->hwbuf_unused
;
557 pa_log_debug("Filling up");
564 const snd_pcm_channel_area_t
*areas
;
565 snd_pcm_uframes_t offset
, frames
;
566 snd_pcm_sframes_t sframes
;
568 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
569 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
571 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
573 if (!after_avail
&& err
== -EAGAIN
)
576 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
582 /* Make sure that if these memblocks need to be copied they will fit into one slot */
583 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
584 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
586 if (!after_avail
&& frames
== 0)
589 pa_assert(frames
> 0);
592 /* Check these are multiples of 8 bit */
593 pa_assert((areas
[0].first
& 7) == 0);
594 pa_assert((areas
[0].step
& 7)== 0);
596 /* We assume a single interleaved memory buffer */
597 pa_assert((areas
[0].first
>> 3) == 0);
598 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
600 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
602 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
603 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
606 pa_sink_render_into_full(u
->sink
, &chunk
);
607 pa_memblock_unref_fixed(chunk
.memblock
);
609 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
611 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
619 u
->write_count
+= frames
* u
->frame_size
;
620 u
->since_start
+= frames
* u
->frame_size
;
623 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
626 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
629 n_bytes
-= (size_t) frames
* u
->frame_size
;
633 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
635 if (*sleep_usec
> process_usec
)
636 *sleep_usec
-= process_usec
;
640 return work_done
? 1 : 0;
643 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
644 pa_bool_t work_done
= FALSE
;
645 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
650 pa_sink_assert_ref(u
->sink
);
653 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
660 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
662 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
668 n_bytes
= (size_t) n
* u
->frame_size
;
669 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
674 /* We won't fill up the playback buffer before at least
675 * half the sleep time is over because otherwise we might
676 * ask for more data from the clients then they expect. We
677 * need to guarantee that clients only have to keep around
678 * a single hw buffer length. */
681 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
684 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
688 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
689 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
690 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
691 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
701 pa_log_debug("Not filling up, because already too many iterations.");
707 n_bytes
-= u
->hwbuf_unused
;
711 snd_pcm_sframes_t frames
;
713 pa_bool_t after_avail
= TRUE
;
715 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
717 if (u
->memchunk
.length
<= 0)
718 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
720 pa_assert(u
->memchunk
.length
> 0);
722 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
724 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
725 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
727 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
728 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
729 pa_memblock_release(u
->memchunk
.memblock
);
731 if (PA_UNLIKELY(frames
< 0)) {
733 if (!after_avail
&& (int) frames
== -EAGAIN
)
736 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
742 if (!after_avail
&& frames
== 0)
745 pa_assert(frames
> 0);
748 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
749 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
751 if (u
->memchunk
.length
<= 0) {
752 pa_memblock_unref(u
->memchunk
.memblock
);
753 pa_memchunk_reset(&u
->memchunk
);
758 u
->write_count
+= frames
* u
->frame_size
;
759 u
->since_start
+= frames
* u
->frame_size
;
761 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
763 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
766 n_bytes
-= (size_t) frames
* u
->frame_size
;
770 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
772 if (*sleep_usec
> process_usec
)
773 *sleep_usec
-= process_usec
;
777 return work_done
? 1 : 0;
780 static void update_smoother(struct userdata
*u
) {
781 snd_pcm_sframes_t delay
= 0;
784 pa_usec_t now1
= 0, now2
;
785 snd_pcm_status_t
*status
;
787 snd_pcm_status_alloca(&status
);
790 pa_assert(u
->pcm_handle
);
792 /* Let's update the time smoother */
794 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
795 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
799 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
800 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
802 snd_htimestamp_t htstamp
= { 0, 0 };
803 snd_pcm_status_get_htstamp(status
, &htstamp
);
804 now1
= pa_timespec_load(&htstamp
);
807 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
809 now1
= pa_rtclock_now();
811 /* check if the time since the last update is bigger than the interval */
812 if (u
->last_smoother_update
> 0)
813 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
816 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
818 if (PA_UNLIKELY(position
< 0))
821 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
823 pa_smoother_put(u
->smoother
, now1
, now2
);
825 u
->last_smoother_update
= now1
;
826 /* exponentially increase the update interval up to the MAX limit */
827 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
830 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
833 pa_usec_t now1
, now2
;
837 now1
= pa_rtclock_now();
838 now2
= pa_smoother_get(u
->smoother
, now1
);
840 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
842 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
844 if (u
->memchunk
.memblock
)
845 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
850 static int build_pollfd(struct userdata
*u
) {
852 pa_assert(u
->pcm_handle
);
854 if (u
->alsa_rtpoll_item
)
855 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
857 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
863 /* Called from IO context */
864 static int suspend(struct userdata
*u
) {
866 pa_assert(u
->pcm_handle
);
868 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
870 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
871 * take awfully long with our long buffer sizes today. */
872 snd_pcm_close(u
->pcm_handle
);
873 u
->pcm_handle
= NULL
;
875 if (u
->alsa_rtpoll_item
) {
876 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
877 u
->alsa_rtpoll_item
= NULL
;
880 pa_log_info("Device suspended...");
885 /* Called from IO context */
886 static int update_sw_params(struct userdata
*u
) {
887 snd_pcm_uframes_t avail_min
;
892 /* Use the full buffer if noone asked us for anything specific */
898 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
901 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
903 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
905 /* We need at least one sample in our buffer */
907 if (PA_UNLIKELY(b
< u
->frame_size
))
910 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
913 fix_min_sleep_wakeup(u
);
914 fix_tsched_watermark(u
);
917 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
919 /* We need at last one frame in the used part of the buffer */
920 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
923 pa_usec_t sleep_usec
, process_usec
;
925 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
926 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
929 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
931 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
932 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
936 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
941 /* Called from IO context */
942 static int unsuspend(struct userdata
*u
) {
947 snd_pcm_uframes_t period_size
;
950 pa_assert(!u
->pcm_handle
);
952 pa_log_info("Trying resume...");
954 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
955 /*SND_PCM_NONBLOCK|*/
956 SND_PCM_NO_AUTO_RESAMPLE
|
957 SND_PCM_NO_AUTO_CHANNELS
|
958 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
959 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
963 ss
= u
->sink
->sample_spec
;
964 nfrags
= u
->nfragments
;
965 period_size
= u
->fragment_size
/ u
->frame_size
;
969 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
970 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
974 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
975 pa_log_warn("Resume failed, couldn't get original access mode.");
979 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
980 pa_log_warn("Resume failed, couldn't restore original sample settings.");
984 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
985 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
986 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
987 (unsigned long) nfrags
, period_size
* u
->frame_size
);
991 if (update_sw_params(u
) < 0)
994 if (build_pollfd(u
) < 0)
998 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
999 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1000 u
->last_smoother_update
= 0;
1005 pa_log_info("Resumed successfully...");
1010 if (u
->pcm_handle
) {
1011 snd_pcm_close(u
->pcm_handle
);
1012 u
->pcm_handle
= NULL
;
1018 /* Called from IO context */
1019 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1020 struct userdata
*u
= PA_SINK(o
)->userdata
;
1024 case PA_SINK_MESSAGE_GET_LATENCY
: {
1028 r
= sink_get_latency(u
);
1030 *((pa_usec_t
*) data
) = r
;
1035 case PA_SINK_MESSAGE_SET_STATE
:
1037 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1039 case PA_SINK_SUSPENDED
:
1040 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1048 case PA_SINK_RUNNING
:
1050 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1051 if (build_pollfd(u
) < 0)
1055 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1056 if (unsuspend(u
) < 0)
1062 case PA_SINK_UNLINKED
:
1064 case PA_SINK_INVALID_STATE
:
1071 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1074 /* Called from main context */
1075 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1076 pa_sink_state_t old_state
;
1079 pa_sink_assert_ref(s
);
1080 pa_assert_se(u
= s
->userdata
);
1082 old_state
= pa_sink_get_state(u
->sink
);
1084 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1086 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1087 if (reserve_init(u
, u
->device_name
) < 0)
1093 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1094 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1097 pa_assert(u
->mixer_handle
);
1099 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1102 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1103 pa_sink_get_volume(u
->sink
, TRUE
);
1104 pa_sink_get_mute(u
->sink
, TRUE
);
1110 static void sink_get_volume_cb(pa_sink
*s
) {
1111 struct userdata
*u
= s
->userdata
;
1113 char t
[PA_CVOLUME_SNPRINT_MAX
];
1116 pa_assert(u
->mixer_path
);
1117 pa_assert(u
->mixer_handle
);
1119 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1122 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1123 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1125 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1127 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1130 s
->real_volume
= u
->hardware_volume
= r
;
1132 /* Hmm, so the hardware volume changed, let's reset our software volume */
1133 if (u
->mixer_path
->has_dB
)
1134 pa_sink_set_soft_volume(s
, NULL
);
1137 static void sink_set_volume_cb(pa_sink
*s
) {
1138 struct userdata
*u
= s
->userdata
;
1140 char t
[PA_CVOLUME_SNPRINT_MAX
];
1143 pa_assert(u
->mixer_path
);
1144 pa_assert(u
->mixer_handle
);
1146 /* Shift up by the base volume */
1147 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1149 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1152 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1153 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1155 u
->hardware_volume
= r
;
1157 if (u
->mixer_path
->has_dB
) {
1158 pa_cvolume new_soft_volume
;
1159 pa_bool_t accurate_enough
;
1161 /* Match exactly what the user requested by software */
1162 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1164 /* If the adjustment to do in software is only minimal we
1165 * can skip it. That saves us CPU at the expense of a bit of
1168 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1169 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1171 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1172 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1173 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1174 pa_yes_no(accurate_enough
));
1176 if (!accurate_enough
)
1177 s
->soft_volume
= new_soft_volume
;
1180 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1182 /* We can't match exactly what the user requested, hence let's
1183 * at least tell the user about it */
1189 static void sink_get_mute_cb(pa_sink
*s
) {
1190 struct userdata
*u
= s
->userdata
;
1194 pa_assert(u
->mixer_path
);
1195 pa_assert(u
->mixer_handle
);
1197 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1203 static void sink_set_mute_cb(pa_sink
*s
) {
1204 struct userdata
*u
= s
->userdata
;
1207 pa_assert(u
->mixer_path
);
1208 pa_assert(u
->mixer_handle
);
1210 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1213 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1214 struct userdata
*u
= s
->userdata
;
1215 pa_alsa_port_data
*data
;
1219 pa_assert(u
->mixer_handle
);
1221 data
= PA_DEVICE_PORT_DATA(p
);
1223 pa_assert_se(u
->mixer_path
= data
->path
);
1224 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1226 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1227 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1228 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1230 if (u
->mixer_path
->max_dB
> 0.0)
1231 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1233 pa_log_info("No particular base volume set, fixing to 0 dB");
1235 s
->base_volume
= PA_VOLUME_NORM
;
1236 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1240 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1250 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1251 struct userdata
*u
= s
->userdata
;
1258 before
= u
->hwbuf_unused
;
1259 update_sw_params(u
);
1261 /* Let's check whether we now use only a smaller part of the
1262 buffer then before. If so, we need to make sure that subsequent
1263 rewinds are relative to the new maximum fill level and not to the
1264 current fill level. Thus, let's do a full rewind once, to clear
1267 if (u
->hwbuf_unused
> before
) {
1268 pa_log_debug("Requesting rewind due to latency change.");
1269 pa_sink_request_rewind(s
, (size_t) -1);
1273 static int process_rewind(struct userdata
*u
) {
1274 snd_pcm_sframes_t unused
;
1275 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1278 /* Figure out how much we shall rewind and reset the counter */
1279 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1281 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1283 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1284 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1288 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1290 if (u
->hwbuf_size
> unused_nbytes
)
1291 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1295 if (rewind_nbytes
> limit_nbytes
)
1296 rewind_nbytes
= limit_nbytes
;
1298 if (rewind_nbytes
> 0) {
1299 snd_pcm_sframes_t in_frames
, out_frames
;
1301 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1303 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1304 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1305 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1306 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1307 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1312 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1314 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1316 if (rewind_nbytes
<= 0)
1317 pa_log_info("Tried rewind, but was apparently not possible.");
1319 u
->write_count
-= rewind_nbytes
;
1320 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1321 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1323 u
->after_rewind
= TRUE
;
1327 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1329 pa_sink_process_rewind(u
->sink
, 0);
1333 static void thread_func(void *userdata
) {
1334 struct userdata
*u
= userdata
;
1335 unsigned short revents
= 0;
1339 pa_log_debug("Thread starting up");
1341 if (u
->core
->realtime_scheduling
)
1342 pa_make_realtime(u
->core
->realtime_priority
);
1344 pa_thread_mq_install(&u
->thread_mq
);
1350 pa_log_debug("Loop");
1353 /* Render some data and write it to the dsp */
1354 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1356 pa_usec_t sleep_usec
= 0;
1357 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1359 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1360 if (process_rewind(u
) < 0)
1364 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1366 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1371 /* pa_log_debug("work_done = %i", work_done); */
1376 pa_log_info("Starting playback.");
1377 snd_pcm_start(u
->pcm_handle
);
1379 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1385 if (u
->use_tsched
) {
1388 if (u
->since_start
<= u
->hwbuf_size
) {
1390 /* USB devices on ALSA seem to hit a buffer
1391 * underrun during the first iterations much
1392 * quicker then we calculate here, probably due to
1393 * the transport latency. To accommodate for that
1394 * we artificially decrease the sleep time until
1395 * we have filled the buffer at least once
1398 if (pa_log_ratelimit())
1399 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1403 /* OK, the playback buffer is now full, let's
1404 * calculate when to wake up next */
1405 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1407 /* Convert from the sound card time domain to the
1408 * system time domain */
1409 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1411 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1413 /* We don't trust the conversion, so we wake up whatever comes first */
1414 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1418 u
->after_rewind
= FALSE
;
1420 } else if (u
->use_tsched
)
1422 /* OK, we're in an invalid state, let's disable our timers */
1423 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1425 /* Hmm, nothing to do. Let's sleep */
1426 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1432 /* Tell ALSA about this and process its response */
1433 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1434 struct pollfd
*pollfd
;
1438 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1440 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1441 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1445 if (revents
& ~POLLOUT
) {
1446 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1451 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1452 pa_log_debug("Wakeup from ALSA!");
1459 /* If this was no regular exit from the loop we have to continue
1460 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1461 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1462 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1465 pa_log_debug("Thread shutting down");
1468 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1474 pa_assert(device_name
);
1476 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1477 pa_sink_new_data_set_name(data
, n
);
1478 data
->namereg_fail
= TRUE
;
1482 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1483 data
->namereg_fail
= TRUE
;
1485 n
= device_id
? device_id
: device_name
;
1486 data
->namereg_fail
= FALSE
;
1490 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1492 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1494 pa_sink_new_data_set_name(data
, t
);
1498 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1500 if (!mapping
&& !element
)
1503 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1504 pa_log_info("Failed to find a working mixer device.");
1510 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1513 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1516 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1517 pa_alsa_path_dump(u
->mixer_path
);
1520 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1523 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1525 pa_log_debug("Probed mixer paths:");
1526 pa_alsa_path_set_dump(u
->mixer_path_set
);
1533 if (u
->mixer_path_set
) {
1534 pa_alsa_path_set_free(u
->mixer_path_set
);
1535 u
->mixer_path_set
= NULL
;
1536 } else if (u
->mixer_path
) {
1537 pa_alsa_path_free(u
->mixer_path
);
1538 u
->mixer_path
= NULL
;
1541 if (u
->mixer_handle
) {
1542 snd_mixer_close(u
->mixer_handle
);
1543 u
->mixer_handle
= NULL
;
1547 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1550 if (!u
->mixer_handle
)
1553 if (u
->sink
->active_port
) {
1554 pa_alsa_port_data
*data
;
1556 /* We have a list of supported paths, so let's activate the
1557 * one that has been chosen as active */
1559 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1560 u
->mixer_path
= data
->path
;
1562 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1565 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1569 if (!u
->mixer_path
&& u
->mixer_path_set
)
1570 u
->mixer_path
= u
->mixer_path_set
->paths
;
1572 if (u
->mixer_path
) {
1573 /* Hmm, we have only a single path, then let's activate it */
1575 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1577 if (u
->mixer_path
->settings
)
1578 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1583 if (!u
->mixer_path
->has_volume
)
1584 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1587 if (u
->mixer_path
->has_dB
) {
1588 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1590 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1591 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1593 if (u
->mixer_path
->max_dB
> 0.0)
1594 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1596 pa_log_info("No particular base volume set, fixing to 0 dB");
1599 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1600 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1601 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1604 u
->sink
->get_volume
= sink_get_volume_cb
;
1605 u
->sink
->set_volume
= sink_set_volume_cb
;
1607 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1608 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1611 if (!u
->mixer_path
->has_mute
) {
1612 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1614 u
->sink
->get_mute
= sink_get_mute_cb
;
1615 u
->sink
->set_mute
= sink_set_mute_cb
;
1616 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1617 pa_log_info("Using hardware mute control.");
1620 u
->mixer_fdl
= pa_alsa_fdlist_new();
1622 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1623 pa_log("Failed to initialize file descriptor monitoring");
1627 if (u
->mixer_path_set
)
1628 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1630 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1635 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1637 struct userdata
*u
= NULL
;
1638 const char *dev_id
= NULL
;
1639 pa_sample_spec ss
, requested_ss
;
1641 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1642 snd_pcm_uframes_t period_frames
, tsched_frames
;
1644 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1645 pa_sink_new_data data
;
1646 pa_alsa_profile_set
*profile_set
= NULL
;
1651 ss
= m
->core
->default_sample_spec
;
1652 map
= m
->core
->default_channel_map
;
1653 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1654 pa_log("Failed to parse sample specification and channel map");
1659 frame_size
= pa_frame_size(&ss
);
1661 nfrags
= m
->core
->default_n_fragments
;
1662 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1664 frag_size
= (uint32_t) frame_size
;
1665 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1666 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1668 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1669 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1670 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1671 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1672 pa_log("Failed to parse buffer metrics");
1676 hwbuf_size
= frag_size
* nfrags
;
1677 period_frames
= frag_size
/frame_size
;
1678 tsched_frames
= tsched_size
/frame_size
;
1680 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1681 pa_log("Failed to parse mmap argument.");
1685 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1686 pa_log("Failed to parse tsched argument.");
1690 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1691 pa_log("Failed to parse ignore_dB argument.");
1695 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1696 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1700 u
= pa_xnew0(struct userdata
, 1);
1703 u
->use_mmap
= use_mmap
;
1704 u
->use_tsched
= use_tsched
;
1706 u
->rtpoll
= pa_rtpoll_new();
1707 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1709 u
->smoother
= pa_smoother_new(
1710 DEFAULT_TSCHED_BUFFER_USEC
*2,
1711 DEFAULT_TSCHED_BUFFER_USEC
*2,
1717 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1719 dev_id
= pa_modargs_get_value(
1721 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1723 if (reserve_init(u
, dev_id
) < 0)
1726 if (reserve_monitor_init(u
, dev_id
) < 0)
1734 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1735 pa_log("device_id= not set");
1739 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1743 SND_PCM_STREAM_PLAYBACK
,
1744 &nfrags
, &period_frames
, tsched_frames
,
1749 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1751 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1754 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1758 SND_PCM_STREAM_PLAYBACK
,
1759 &nfrags
, &period_frames
, tsched_frames
,
1760 &b
, &d
, profile_set
, &mapping
)))
1766 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1767 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1770 SND_PCM_STREAM_PLAYBACK
,
1771 &nfrags
, &period_frames
, tsched_frames
,
1776 pa_assert(u
->device_name
);
1777 pa_log_info("Successfully opened device %s.", u
->device_name
);
1779 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1780 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1785 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1787 if (use_mmap
&& !b
) {
1788 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1789 u
->use_mmap
= use_mmap
= FALSE
;
1792 if (use_tsched
&& (!b
|| !d
)) {
1793 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1794 u
->use_tsched
= use_tsched
= FALSE
;
1797 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1798 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1799 u
->use_tsched
= use_tsched
= FALSE
;
1803 pa_log_info("Successfully enabled mmap() mode.");
1806 pa_log_info("Successfully enabled timer-based scheduling mode.");
1808 /* ALSA might tweak the sample spec, so recalculate the frame size */
1809 frame_size
= pa_frame_size(&ss
);
1811 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1813 pa_sink_new_data_init(&data
);
1814 data
.driver
= driver
;
1817 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1818 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1819 pa_sink_new_data_set_channel_map(&data
, &map
);
1821 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1822 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1823 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1824 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1825 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1828 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1829 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1832 pa_alsa_init_description(data
.proplist
);
1834 if (u
->control_device
)
1835 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1837 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1838 pa_log("Invalid properties");
1839 pa_sink_new_data_done(&data
);
1843 if (u
->mixer_path_set
)
1844 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1846 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1847 pa_sink_new_data_done(&data
);
1850 pa_log("Failed to create sink object");
1854 u
->sink
->parent
.process_msg
= sink_process_msg
;
1855 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1856 u
->sink
->set_state
= sink_set_state_cb
;
1857 u
->sink
->set_port
= sink_set_port_cb
;
1858 u
->sink
->userdata
= u
;
1860 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1861 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1863 u
->frame_size
= frame_size
;
1864 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1865 u
->nfragments
= nfrags
;
1866 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1867 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1869 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1870 nfrags
, (long unsigned) u
->fragment_size
,
1871 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1873 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1874 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1876 if (u
->use_tsched
) {
1877 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1879 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
1880 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
1882 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1883 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
1885 fix_min_sleep_wakeup(u
);
1886 fix_tsched_watermark(u
);
1888 pa_sink_set_latency_range(u
->sink
,
1890 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1892 pa_log_info("Time scheduling watermark is %0.2fms",
1893 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1895 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1900 if (update_sw_params(u
) < 0)
1903 if (setup_mixer(u
, ignore_dB
) < 0)
1906 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1908 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1909 pa_log("Failed to create thread.");
1913 /* Get initial mixer settings */
1914 if (data
.volume_is_set
) {
1915 if (u
->sink
->set_volume
)
1916 u
->sink
->set_volume(u
->sink
);
1918 if (u
->sink
->get_volume
)
1919 u
->sink
->get_volume(u
->sink
);
1922 if (data
.muted_is_set
) {
1923 if (u
->sink
->set_mute
)
1924 u
->sink
->set_mute(u
->sink
);
1926 if (u
->sink
->get_mute
)
1927 u
->sink
->get_mute(u
->sink
);
1930 pa_sink_put(u
->sink
);
1933 pa_alsa_profile_set_free(profile_set
);
1943 pa_alsa_profile_set_free(profile_set
);
1948 static void userdata_free(struct userdata
*u
) {
1952 pa_sink_unlink(u
->sink
);
1955 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1956 pa_thread_free(u
->thread
);
1959 pa_thread_mq_done(&u
->thread_mq
);
1962 pa_sink_unref(u
->sink
);
1964 if (u
->memchunk
.memblock
)
1965 pa_memblock_unref(u
->memchunk
.memblock
);
1967 if (u
->alsa_rtpoll_item
)
1968 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1971 pa_rtpoll_free(u
->rtpoll
);
1973 if (u
->pcm_handle
) {
1974 snd_pcm_drop(u
->pcm_handle
);
1975 snd_pcm_close(u
->pcm_handle
);
1979 pa_alsa_fdlist_free(u
->mixer_fdl
);
1981 if (u
->mixer_path_set
)
1982 pa_alsa_path_set_free(u
->mixer_path_set
);
1983 else if (u
->mixer_path
)
1984 pa_alsa_path_free(u
->mixer_path
);
1986 if (u
->mixer_handle
)
1987 snd_mixer_close(u
->mixer_handle
);
1990 pa_smoother_free(u
->smoother
);
1995 pa_xfree(u
->device_name
);
1996 pa_xfree(u
->control_device
);
2000 void pa_alsa_sink_free(pa_sink
*s
) {
2003 pa_sink_assert_ref(s
);
2004 pa_assert_se(u
= s
->userdata
);