2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
71 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
72 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
74 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
82 pa_thread_mq thread_mq
;
85 snd_pcm_t
*pcm_handle
;
87 pa_alsa_fdlist
*mixer_fdl
;
88 snd_mixer_t
*mixer_handle
;
89 pa_alsa_path_set
*mixer_path_set
;
90 pa_alsa_path
*mixer_path
;
92 pa_cvolume hardware_volume
;
105 pa_memchunk memchunk
;
107 char *device_name
; /* name of the PCM device */
108 char *control_device
; /* name of the control device */
110 pa_bool_t use_mmap
:1, use_tsched
:1;
112 pa_bool_t first
, after_rewind
;
114 pa_rtpoll_item
*alsa_rtpoll_item
;
116 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
118 pa_smoother
*smoother
;
119 uint64_t write_count
;
120 uint64_t since_start
;
121 pa_usec_t smoother_interval
;
122 pa_usec_t last_smoother_update
;
124 pa_reserve_wrapper
*reserve
;
125 pa_hook_slot
*reserve_slot
;
126 pa_reserve_monitor_wrapper
*monitor
;
127 pa_hook_slot
*monitor_slot
;
130 static void userdata_free(struct userdata
*u
);
132 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
136 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
137 return PA_HOOK_CANCEL
;
142 static void reserve_done(struct userdata
*u
) {
145 if (u
->reserve_slot
) {
146 pa_hook_slot_free(u
->reserve_slot
);
147 u
->reserve_slot
= NULL
;
151 pa_reserve_wrapper_unref(u
->reserve
);
156 static void reserve_update(struct userdata
*u
) {
157 const char *description
;
160 if (!u
->sink
|| !u
->reserve
)
163 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
164 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
167 static int reserve_init(struct userdata
*u
, const char *dname
) {
176 if (pa_in_system_mode())
179 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
182 /* We are resuming, try to lock the device */
183 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
191 pa_assert(!u
->reserve_slot
);
192 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
197 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
203 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
205 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
209 static void monitor_done(struct userdata
*u
) {
212 if (u
->monitor_slot
) {
213 pa_hook_slot_free(u
->monitor_slot
);
214 u
->monitor_slot
= NULL
;
218 pa_reserve_monitor_wrapper_unref(u
->monitor
);
223 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
229 if (pa_in_system_mode())
232 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
235 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
241 pa_assert(!u
->monitor_slot
);
242 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
247 static void fix_min_sleep_wakeup(struct userdata
*u
) {
248 size_t max_use
, max_use_2
;
252 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
253 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
255 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
256 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
258 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
259 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
262 static void fix_tsched_watermark(struct userdata
*u
) {
266 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
268 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
269 u
->tsched_watermark
= max_use
- u
->min_sleep
;
271 if (u
->tsched_watermark
< u
->min_wakeup
)
272 u
->tsched_watermark
= u
->min_wakeup
;
275 static void adjust_after_underrun(struct userdata
*u
) {
276 size_t old_watermark
;
277 pa_usec_t old_min_latency
, new_min_latency
;
280 pa_assert(u
->use_tsched
);
282 /* First, just try to increase the watermark */
283 old_watermark
= u
->tsched_watermark
;
284 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_step
);
285 fix_tsched_watermark(u
);
287 if (old_watermark
!= u
->tsched_watermark
) {
288 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
289 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
293 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
294 old_min_latency
= u
->sink
->thread_info
.min_latency
;
295 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_STEP_USEC
);
296 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
298 if (old_min_latency
!= new_min_latency
) {
299 pa_log_notice("Increasing minimal latency to %0.2f ms",
300 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
302 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
306 /* When we reach this we're officialy fucked! */
309 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
312 pa_assert(sleep_usec
);
313 pa_assert(process_usec
);
317 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
319 if (usec
== (pa_usec_t
) -1)
320 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
322 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
327 *sleep_usec
= usec
- wm
;
331 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
332 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
333 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
334 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
338 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
343 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
345 pa_assert(err
!= -EAGAIN
);
348 pa_log_debug("%s: Buffer underrun!", call
);
350 if (err
== -ESTRPIPE
)
351 pa_log_debug("%s: System suspended!", call
);
353 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
354 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
363 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
) {
366 /* We use <= instead of < for this check here because an underrun
367 * only happens after the last sample was processed, not already when
368 * it is removed from the buffer. This is particularly important
369 * when block transfer is used. */
371 if (n_bytes
<= u
->hwbuf_size
) {
372 left_to_play
= u
->hwbuf_size
- n_bytes
;
375 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
385 if (!u
->first
&& !u
->after_rewind
) {
387 if (pa_log_ratelimit())
388 pa_log_info("Underrun!");
391 adjust_after_underrun(u
);
398 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
399 pa_bool_t work_done
= TRUE
;
400 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
405 pa_sink_assert_ref(u
->sink
);
408 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
414 pa_bool_t after_avail
= TRUE
;
416 /* First we determine how many samples are missing to fill the
417 * buffer up to 100% */
419 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
421 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
427 n_bytes
= (size_t) n
* u
->frame_size
;
430 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
433 left_to_play
= check_left_to_play(u
, n_bytes
);
437 /* We won't fill up the playback buffer before at least
438 * half the sleep time is over because otherwise we might
439 * ask for more data from the clients then they expect. We
440 * need to guarantee that clients only have to keep around
441 * a single hw buffer length. */
444 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
446 pa_log_debug("Not filling up, because too early.");
451 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
455 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
456 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
457 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
458 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
464 pa_log_debug("Not filling up, because not necessary.");
472 pa_log_debug("Not filling up, because already too many iterations.");
478 n_bytes
-= u
->hwbuf_unused
;
482 pa_log_debug("Filling up");
489 const snd_pcm_channel_area_t
*areas
;
490 snd_pcm_uframes_t offset
, frames
;
491 snd_pcm_sframes_t sframes
;
493 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
494 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
496 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
498 if (!after_avail
&& err
== -EAGAIN
)
501 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
507 /* Make sure that if these memblocks need to be copied they will fit into one slot */
508 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
509 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
511 if (!after_avail
&& frames
== 0)
514 pa_assert(frames
> 0);
517 /* Check these are multiples of 8 bit */
518 pa_assert((areas
[0].first
& 7) == 0);
519 pa_assert((areas
[0].step
& 7)== 0);
521 /* We assume a single interleaved memory buffer */
522 pa_assert((areas
[0].first
>> 3) == 0);
523 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
525 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
527 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
528 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
531 pa_sink_render_into_full(u
->sink
, &chunk
);
532 pa_memblock_unref_fixed(chunk
.memblock
);
534 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
536 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
544 u
->write_count
+= frames
* u
->frame_size
;
545 u
->since_start
+= frames
* u
->frame_size
;
548 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
551 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
554 n_bytes
-= (size_t) frames
* u
->frame_size
;
558 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
560 if (*sleep_usec
> process_usec
)
561 *sleep_usec
-= process_usec
;
565 return work_done
? 1 : 0;
568 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
569 pa_bool_t work_done
= FALSE
;
570 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
575 pa_sink_assert_ref(u
->sink
);
578 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
585 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
587 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
593 n_bytes
= (size_t) n
* u
->frame_size
;
594 left_to_play
= check_left_to_play(u
, n_bytes
);
598 /* We won't fill up the playback buffer before at least
599 * half the sleep time is over because otherwise we might
600 * ask for more data from the clients then they expect. We
601 * need to guarantee that clients only have to keep around
602 * a single hw buffer length. */
605 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
608 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
612 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
613 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
614 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
615 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
625 pa_log_debug("Not filling up, because already too many iterations.");
631 n_bytes
-= u
->hwbuf_unused
;
635 snd_pcm_sframes_t frames
;
637 pa_bool_t after_avail
= TRUE
;
639 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
641 if (u
->memchunk
.length
<= 0)
642 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
644 pa_assert(u
->memchunk
.length
> 0);
646 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
648 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
649 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
651 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
652 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
653 pa_memblock_release(u
->memchunk
.memblock
);
655 if (PA_UNLIKELY(frames
< 0)) {
657 if (!after_avail
&& (int) frames
== -EAGAIN
)
660 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
666 if (!after_avail
&& frames
== 0)
669 pa_assert(frames
> 0);
672 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
673 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
675 if (u
->memchunk
.length
<= 0) {
676 pa_memblock_unref(u
->memchunk
.memblock
);
677 pa_memchunk_reset(&u
->memchunk
);
682 u
->write_count
+= frames
* u
->frame_size
;
683 u
->since_start
+= frames
* u
->frame_size
;
685 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
687 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
690 n_bytes
-= (size_t) frames
* u
->frame_size
;
694 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
696 if (*sleep_usec
> process_usec
)
697 *sleep_usec
-= process_usec
;
701 return work_done
? 1 : 0;
704 static void update_smoother(struct userdata
*u
) {
705 snd_pcm_sframes_t delay
= 0;
708 pa_usec_t now1
= 0, now2
;
709 snd_pcm_status_t
*status
;
711 snd_pcm_status_alloca(&status
);
714 pa_assert(u
->pcm_handle
);
716 /* Let's update the time smoother */
718 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
719 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
723 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
724 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
726 snd_htimestamp_t htstamp
= { 0, 0 };
727 snd_pcm_status_get_htstamp(status
, &htstamp
);
728 now1
= pa_timespec_load(&htstamp
);
731 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
733 now1
= pa_rtclock_now();
735 /* check if the time since the last update is bigger than the interval */
736 if (u
->last_smoother_update
> 0) {
737 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
741 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
743 if (PA_UNLIKELY(position
< 0))
746 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
748 u
->last_smoother_update
= now1
;
749 /* exponentially increase the update interval up to the MAX limit */
750 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
752 pa_smoother_put(u
->smoother
, now1
, now2
);
755 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
758 pa_usec_t now1
, now2
;
762 now1
= pa_rtclock_now();
763 now2
= pa_smoother_get(u
->smoother
, now1
);
765 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
767 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
769 if (u
->memchunk
.memblock
)
770 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
775 static int build_pollfd(struct userdata
*u
) {
777 pa_assert(u
->pcm_handle
);
779 if (u
->alsa_rtpoll_item
)
780 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
782 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
788 /* Called from IO context */
789 static int suspend(struct userdata
*u
) {
791 pa_assert(u
->pcm_handle
);
793 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
795 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
796 * take awfully long with our long buffer sizes today. */
797 snd_pcm_close(u
->pcm_handle
);
798 u
->pcm_handle
= NULL
;
800 if (u
->alsa_rtpoll_item
) {
801 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
802 u
->alsa_rtpoll_item
= NULL
;
805 pa_log_info("Device suspended...");
810 /* Called from IO context */
811 static int update_sw_params(struct userdata
*u
) {
812 snd_pcm_uframes_t avail_min
;
817 /* Use the full buffer if noone asked us for anything specific */
823 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
826 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
828 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
830 /* We need at least one sample in our buffer */
832 if (PA_UNLIKELY(b
< u
->frame_size
))
835 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
838 fix_min_sleep_wakeup(u
);
839 fix_tsched_watermark(u
);
842 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
844 /* We need at last one frame in the used part of the buffer */
845 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
848 pa_usec_t sleep_usec
, process_usec
;
850 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
851 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
854 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
856 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
857 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
861 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
866 /* Called from IO context */
867 static int unsuspend(struct userdata
*u
) {
872 snd_pcm_uframes_t period_size
;
875 pa_assert(!u
->pcm_handle
);
877 pa_log_info("Trying resume...");
879 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
880 /*SND_PCM_NONBLOCK|*/
881 SND_PCM_NO_AUTO_RESAMPLE
|
882 SND_PCM_NO_AUTO_CHANNELS
|
883 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
884 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
888 ss
= u
->sink
->sample_spec
;
889 nfrags
= u
->nfragments
;
890 period_size
= u
->fragment_size
/ u
->frame_size
;
894 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
895 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
899 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
900 pa_log_warn("Resume failed, couldn't get original access mode.");
904 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
905 pa_log_warn("Resume failed, couldn't restore original sample settings.");
909 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
910 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
911 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
912 (unsigned long) nfrags
, period_size
* u
->frame_size
);
916 if (update_sw_params(u
) < 0)
919 if (build_pollfd(u
) < 0)
923 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
924 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
925 u
->last_smoother_update
= 0;
931 pa_log_info("Resumed successfully...");
937 snd_pcm_close(u
->pcm_handle
);
938 u
->pcm_handle
= NULL
;
944 /* Called from IO context */
945 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
946 struct userdata
*u
= PA_SINK(o
)->userdata
;
950 case PA_SINK_MESSAGE_GET_LATENCY
: {
954 r
= sink_get_latency(u
);
956 *((pa_usec_t
*) data
) = r
;
961 case PA_SINK_MESSAGE_SET_STATE
:
963 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
965 case PA_SINK_SUSPENDED
:
966 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
974 case PA_SINK_RUNNING
:
976 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
977 if (build_pollfd(u
) < 0)
981 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
982 if (unsuspend(u
) < 0)
988 case PA_SINK_UNLINKED
:
990 case PA_SINK_INVALID_STATE
:
997 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1000 /* Called from main context */
1001 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1002 pa_sink_state_t old_state
;
1005 pa_sink_assert_ref(s
);
1006 pa_assert_se(u
= s
->userdata
);
1008 old_state
= pa_sink_get_state(u
->sink
);
1010 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1012 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1013 if (reserve_init(u
, u
->device_name
) < 0)
1019 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1020 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1023 pa_assert(u
->mixer_handle
);
1025 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1028 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1029 pa_sink_get_volume(u
->sink
, TRUE
);
1030 pa_sink_get_mute(u
->sink
, TRUE
);
1036 static void sink_get_volume_cb(pa_sink
*s
) {
1037 struct userdata
*u
= s
->userdata
;
1039 char t
[PA_CVOLUME_SNPRINT_MAX
];
1042 pa_assert(u
->mixer_path
);
1043 pa_assert(u
->mixer_handle
);
1045 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1048 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1049 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1051 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1053 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1056 s
->real_volume
= u
->hardware_volume
= r
;
1058 /* Hmm, so the hardware volume changed, let's reset our software volume */
1059 if (u
->mixer_path
->has_dB
)
1060 pa_sink_set_soft_volume(s
, NULL
);
1063 static void sink_set_volume_cb(pa_sink
*s
) {
1064 struct userdata
*u
= s
->userdata
;
1066 char t
[PA_CVOLUME_SNPRINT_MAX
];
1069 pa_assert(u
->mixer_path
);
1070 pa_assert(u
->mixer_handle
);
1072 /* Shift up by the base volume */
1073 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1075 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1078 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1079 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1081 u
->hardware_volume
= r
;
1083 if (u
->mixer_path
->has_dB
) {
1084 pa_cvolume new_soft_volume
;
1085 pa_bool_t accurate_enough
;
1087 /* Match exactly what the user requested by software */
1088 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1090 /* If the adjustment to do in software is only minimal we
1091 * can skip it. That saves us CPU at the expense of a bit of
1094 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1095 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1097 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1098 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1099 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1100 pa_yes_no(accurate_enough
));
1102 if (!accurate_enough
)
1103 s
->soft_volume
= new_soft_volume
;
1106 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1108 /* We can't match exactly what the user requested, hence let's
1109 * at least tell the user about it */
1115 static void sink_get_mute_cb(pa_sink
*s
) {
1116 struct userdata
*u
= s
->userdata
;
1120 pa_assert(u
->mixer_path
);
1121 pa_assert(u
->mixer_handle
);
1123 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1129 static void sink_set_mute_cb(pa_sink
*s
) {
1130 struct userdata
*u
= s
->userdata
;
1133 pa_assert(u
->mixer_path
);
1134 pa_assert(u
->mixer_handle
);
1136 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1139 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1140 struct userdata
*u
= s
->userdata
;
1141 pa_alsa_port_data
*data
;
1145 pa_assert(u
->mixer_handle
);
1147 data
= PA_DEVICE_PORT_DATA(p
);
1149 pa_assert_se(u
->mixer_path
= data
->path
);
1150 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1152 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1153 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1154 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1156 if (u
->mixer_path
->max_dB
> 0.0)
1157 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1159 pa_log_info("No particular base volume set, fixing to 0 dB");
1161 s
->base_volume
= PA_VOLUME_NORM
;
1162 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1166 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1176 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1177 struct userdata
*u
= s
->userdata
;
1184 before
= u
->hwbuf_unused
;
1185 update_sw_params(u
);
1187 /* Let's check whether we now use only a smaller part of the
1188 buffer then before. If so, we need to make sure that subsequent
1189 rewinds are relative to the new maximum fill level and not to the
1190 current fill level. Thus, let's do a full rewind once, to clear
1193 if (u
->hwbuf_unused
> before
) {
1194 pa_log_debug("Requesting rewind due to latency change.");
1195 pa_sink_request_rewind(s
, (size_t) -1);
1199 static int process_rewind(struct userdata
*u
) {
1200 snd_pcm_sframes_t unused
;
1201 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1204 /* Figure out how much we shall rewind and reset the counter */
1205 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1207 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1209 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1210 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1214 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1216 if (u
->hwbuf_size
> unused_nbytes
)
1217 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1221 if (rewind_nbytes
> limit_nbytes
)
1222 rewind_nbytes
= limit_nbytes
;
1224 if (rewind_nbytes
> 0) {
1225 snd_pcm_sframes_t in_frames
, out_frames
;
1227 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1229 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1230 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1231 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1232 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1233 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1238 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1240 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1242 if (rewind_nbytes
<= 0)
1243 pa_log_info("Tried rewind, but was apparently not possible.");
1245 u
->write_count
-= rewind_nbytes
;
1246 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1247 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1249 u
->after_rewind
= TRUE
;
1253 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1255 pa_sink_process_rewind(u
->sink
, 0);
1259 static void thread_func(void *userdata
) {
1260 struct userdata
*u
= userdata
;
1261 unsigned short revents
= 0;
1265 pa_log_debug("Thread starting up");
1267 if (u
->core
->realtime_scheduling
)
1268 pa_make_realtime(u
->core
->realtime_priority
);
1270 pa_thread_mq_install(&u
->thread_mq
);
1276 pa_log_debug("Loop");
1279 /* Render some data and write it to the dsp */
1280 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1282 pa_usec_t sleep_usec
= 0;
1284 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1285 if (process_rewind(u
) < 0)
1289 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1291 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1296 /* pa_log_debug("work_done = %i", work_done); */
1301 pa_log_info("Starting playback.");
1302 snd_pcm_start(u
->pcm_handle
);
1304 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1310 if (u
->use_tsched
) {
1313 if (u
->since_start
<= u
->hwbuf_size
) {
1315 /* USB devices on ALSA seem to hit a buffer
1316 * underrun during the first iterations much
1317 * quicker then we calculate here, probably due to
1318 * the transport latency. To accommodate for that
1319 * we artificially decrease the sleep time until
1320 * we have filled the buffer at least once
1323 if (pa_log_ratelimit())
1324 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1328 /* OK, the playback buffer is now full, let's
1329 * calculate when to wake up next */
1330 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1332 /* Convert from the sound card time domain to the
1333 * system time domain */
1334 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1336 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1338 /* We don't trust the conversion, so we wake up whatever comes first */
1339 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1343 u
->after_rewind
= FALSE
;
1345 } else if (u
->use_tsched
)
1347 /* OK, we're in an invalid state, let's disable our timers */
1348 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1350 /* Hmm, nothing to do. Let's sleep */
1351 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1357 /* Tell ALSA about this and process its response */
1358 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1359 struct pollfd
*pollfd
;
1363 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1365 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1366 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1370 if (revents
& ~POLLOUT
) {
1371 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1376 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1377 pa_log_debug("Wakeup from ALSA!");
1384 /* If this was no regular exit from the loop we have to continue
1385 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1386 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1387 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1390 pa_log_debug("Thread shutting down");
1393 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1399 pa_assert(device_name
);
1401 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1402 pa_sink_new_data_set_name(data
, n
);
1403 data
->namereg_fail
= TRUE
;
1407 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1408 data
->namereg_fail
= TRUE
;
1410 n
= device_id
? device_id
: device_name
;
1411 data
->namereg_fail
= FALSE
;
1415 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1417 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1419 pa_sink_new_data_set_name(data
, t
);
1423 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1425 if (!mapping
&& !element
)
1428 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1429 pa_log_info("Failed to find a working mixer device.");
1435 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1438 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1441 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1442 pa_alsa_path_dump(u
->mixer_path
);
1445 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1448 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1450 pa_log_debug("Probed mixer paths:");
1451 pa_alsa_path_set_dump(u
->mixer_path_set
);
1458 if (u
->mixer_path_set
) {
1459 pa_alsa_path_set_free(u
->mixer_path_set
);
1460 u
->mixer_path_set
= NULL
;
1461 } else if (u
->mixer_path
) {
1462 pa_alsa_path_free(u
->mixer_path
);
1463 u
->mixer_path
= NULL
;
1466 if (u
->mixer_handle
) {
1467 snd_mixer_close(u
->mixer_handle
);
1468 u
->mixer_handle
= NULL
;
1472 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1475 if (!u
->mixer_handle
)
1478 if (u
->sink
->active_port
) {
1479 pa_alsa_port_data
*data
;
1481 /* We have a list of supported paths, so let's activate the
1482 * one that has been chosen as active */
1484 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1485 u
->mixer_path
= data
->path
;
1487 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1490 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1494 if (!u
->mixer_path
&& u
->mixer_path_set
)
1495 u
->mixer_path
= u
->mixer_path_set
->paths
;
1497 if (u
->mixer_path
) {
1498 /* Hmm, we have only a single path, then let's activate it */
1500 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1502 if (u
->mixer_path
->settings
)
1503 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1508 if (!u
->mixer_path
->has_volume
)
1509 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1512 if (u
->mixer_path
->has_dB
) {
1513 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1515 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1516 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1518 if (u
->mixer_path
->max_dB
> 0.0)
1519 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1521 pa_log_info("No particular base volume set, fixing to 0 dB");
1524 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1525 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1526 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1529 u
->sink
->get_volume
= sink_get_volume_cb
;
1530 u
->sink
->set_volume
= sink_set_volume_cb
;
1532 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1533 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1536 if (!u
->mixer_path
->has_mute
) {
1537 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1539 u
->sink
->get_mute
= sink_get_mute_cb
;
1540 u
->sink
->set_mute
= sink_set_mute_cb
;
1541 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1542 pa_log_info("Using hardware mute control.");
1545 u
->mixer_fdl
= pa_alsa_fdlist_new();
1547 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1548 pa_log("Failed to initialize file descriptor monitoring");
1552 if (u
->mixer_path_set
)
1553 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1555 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1560 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1562 struct userdata
*u
= NULL
;
1563 const char *dev_id
= NULL
;
1564 pa_sample_spec ss
, requested_ss
;
1566 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1567 snd_pcm_uframes_t period_frames
, tsched_frames
;
1569 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1570 pa_sink_new_data data
;
1571 pa_alsa_profile_set
*profile_set
= NULL
;
1576 ss
= m
->core
->default_sample_spec
;
1577 map
= m
->core
->default_channel_map
;
1578 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1579 pa_log("Failed to parse sample specification and channel map");
1584 frame_size
= pa_frame_size(&ss
);
1586 nfrags
= m
->core
->default_n_fragments
;
1587 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1589 frag_size
= (uint32_t) frame_size
;
1590 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1591 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1593 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1594 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1595 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1596 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1597 pa_log("Failed to parse buffer metrics");
1601 hwbuf_size
= frag_size
* nfrags
;
1602 period_frames
= frag_size
/frame_size
;
1603 tsched_frames
= tsched_size
/frame_size
;
1605 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1606 pa_log("Failed to parse mmap argument.");
1610 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1611 pa_log("Failed to parse tsched argument.");
1615 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1616 pa_log("Failed to parse ignore_dB argument.");
1620 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1621 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1625 u
= pa_xnew0(struct userdata
, 1);
1628 u
->use_mmap
= use_mmap
;
1629 u
->use_tsched
= use_tsched
;
1631 u
->rtpoll
= pa_rtpoll_new();
1632 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1634 u
->smoother
= pa_smoother_new(
1635 DEFAULT_TSCHED_BUFFER_USEC
*2,
1636 DEFAULT_TSCHED_BUFFER_USEC
*2,
1642 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1644 dev_id
= pa_modargs_get_value(
1646 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1648 if (reserve_init(u
, dev_id
) < 0)
1651 if (reserve_monitor_init(u
, dev_id
) < 0)
1659 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1660 pa_log("device_id= not set");
1664 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1668 SND_PCM_STREAM_PLAYBACK
,
1669 &nfrags
, &period_frames
, tsched_frames
,
1674 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1676 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1679 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1683 SND_PCM_STREAM_PLAYBACK
,
1684 &nfrags
, &period_frames
, tsched_frames
,
1685 &b
, &d
, profile_set
, &mapping
)))
1691 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1692 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1695 SND_PCM_STREAM_PLAYBACK
,
1696 &nfrags
, &period_frames
, tsched_frames
,
1701 pa_assert(u
->device_name
);
1702 pa_log_info("Successfully opened device %s.", u
->device_name
);
1704 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1705 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1710 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1712 if (use_mmap
&& !b
) {
1713 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1714 u
->use_mmap
= use_mmap
= FALSE
;
1717 if (use_tsched
&& (!b
|| !d
)) {
1718 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1719 u
->use_tsched
= use_tsched
= FALSE
;
1722 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1723 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1724 u
->use_tsched
= use_tsched
= FALSE
;
1728 pa_log_info("Successfully enabled mmap() mode.");
1731 pa_log_info("Successfully enabled timer-based scheduling mode.");
1733 /* ALSA might tweak the sample spec, so recalculate the frame size */
1734 frame_size
= pa_frame_size(&ss
);
1736 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1738 pa_sink_new_data_init(&data
);
1739 data
.driver
= driver
;
1742 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1743 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1744 pa_sink_new_data_set_channel_map(&data
, &map
);
1746 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1747 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1748 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1749 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1750 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1753 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1754 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1757 pa_alsa_init_description(data
.proplist
);
1759 if (u
->control_device
)
1760 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1762 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1763 pa_log("Invalid properties");
1764 pa_sink_new_data_done(&data
);
1768 if (u
->mixer_path_set
)
1769 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1771 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1772 pa_sink_new_data_done(&data
);
1775 pa_log("Failed to create sink object");
1779 u
->sink
->parent
.process_msg
= sink_process_msg
;
1780 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1781 u
->sink
->set_state
= sink_set_state_cb
;
1782 u
->sink
->set_port
= sink_set_port_cb
;
1783 u
->sink
->userdata
= u
;
1785 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1786 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1788 u
->frame_size
= frame_size
;
1789 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1790 u
->nfragments
= nfrags
;
1791 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1792 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1793 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1795 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1796 nfrags
, (long unsigned) u
->fragment_size
,
1797 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1799 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1800 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1802 if (u
->use_tsched
) {
1803 u
->watermark_step
= pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC
, &u
->sink
->sample_spec
);
1805 fix_min_sleep_wakeup(u
);
1806 fix_tsched_watermark(u
);
1808 pa_sink_set_latency_range(u
->sink
,
1810 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1812 pa_log_info("Time scheduling watermark is %0.2fms",
1813 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1815 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1819 if (update_sw_params(u
) < 0)
1822 if (setup_mixer(u
, ignore_dB
) < 0)
1825 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1827 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1828 pa_log("Failed to create thread.");
1832 /* Get initial mixer settings */
1833 if (data
.volume_is_set
) {
1834 if (u
->sink
->set_volume
)
1835 u
->sink
->set_volume(u
->sink
);
1837 if (u
->sink
->get_volume
)
1838 u
->sink
->get_volume(u
->sink
);
1841 if (data
.muted_is_set
) {
1842 if (u
->sink
->set_mute
)
1843 u
->sink
->set_mute(u
->sink
);
1845 if (u
->sink
->get_mute
)
1846 u
->sink
->get_mute(u
->sink
);
1849 pa_sink_put(u
->sink
);
1852 pa_alsa_profile_set_free(profile_set
);
1862 pa_alsa_profile_set_free(profile_set
);
1867 static void userdata_free(struct userdata
*u
) {
1871 pa_sink_unlink(u
->sink
);
1874 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1875 pa_thread_free(u
->thread
);
1878 pa_thread_mq_done(&u
->thread_mq
);
1881 pa_sink_unref(u
->sink
);
1883 if (u
->memchunk
.memblock
)
1884 pa_memblock_unref(u
->memchunk
.memblock
);
1886 if (u
->alsa_rtpoll_item
)
1887 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1890 pa_rtpoll_free(u
->rtpoll
);
1892 if (u
->pcm_handle
) {
1893 snd_pcm_drop(u
->pcm_handle
);
1894 snd_pcm_close(u
->pcm_handle
);
1898 pa_alsa_fdlist_free(u
->mixer_fdl
);
1900 if (u
->mixer_path_set
)
1901 pa_alsa_path_set_free(u
->mixer_path_set
);
1902 else if (u
->mixer_path
)
1903 pa_alsa_path_free(u
->mixer_path
);
1905 if (u
->mixer_handle
)
1906 snd_mixer_close(u
->mixer_handle
);
1909 pa_smoother_free(u
->smoother
);
1914 pa_xfree(u
->device_name
);
1915 pa_xfree(u
->control_device
);
1919 void pa_alsa_sink_free(pa_sink
*s
) {
1922 pa_sink_assert_ref(s
);
1923 pa_assert_se(u
= s
->userdata
);