2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/timeval.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/thread-mq.h>
53 #include <pulsecore/rtpoll.h>
54 #include <pulsecore/time-smoother.h>
56 #include <modules/reserve-wrap.h>
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
61 /* #define DEBUG_TIMING */
63 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
69 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
70 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
71 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
72 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
75 * will increase the watermark only if we hit a real underrun. */
77 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
78 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s -- smoother windows size */
81 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s -- smoother adjust time */
83 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
84 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update interval */
86 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
89 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
97 pa_thread_mq thread_mq
;
100 snd_pcm_t
*pcm_handle
;
102 pa_alsa_fdlist
*mixer_fdl
;
103 pa_alsa_mixer_pdata
*mixer_pd
;
104 snd_mixer_t
*mixer_handle
;
105 pa_alsa_path_set
*mixer_path_set
;
106 pa_alsa_path
*mixer_path
;
108 pa_cvolume hardware_volume
;
122 watermark_inc_threshold
,
123 watermark_dec_threshold
,
126 pa_usec_t watermark_dec_not_before
;
128 pa_memchunk memchunk
;
130 char *device_name
; /* name of the PCM device */
131 char *control_device
; /* name of the control device */
133 pa_bool_t use_mmap
:1, use_tsched
:1, sync_volume
:1;
135 pa_bool_t first
, after_rewind
;
137 pa_rtpoll_item
*alsa_rtpoll_item
;
139 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
141 pa_smoother
*smoother
;
142 uint64_t write_count
;
143 uint64_t since_start
;
144 pa_usec_t smoother_interval
;
145 pa_usec_t last_smoother_update
;
147 pa_reserve_wrapper
*reserve
;
148 pa_hook_slot
*reserve_slot
;
149 pa_reserve_monitor_wrapper
*monitor
;
150 pa_hook_slot
*monitor_slot
;
153 static void userdata_free(struct userdata
*u
);
155 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
159 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
160 return PA_HOOK_CANCEL
;
165 static void reserve_done(struct userdata
*u
) {
168 if (u
->reserve_slot
) {
169 pa_hook_slot_free(u
->reserve_slot
);
170 u
->reserve_slot
= NULL
;
174 pa_reserve_wrapper_unref(u
->reserve
);
179 static void reserve_update(struct userdata
*u
) {
180 const char *description
;
183 if (!u
->sink
|| !u
->reserve
)
186 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
187 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
190 static int reserve_init(struct userdata
*u
, const char *dname
) {
199 if (pa_in_system_mode())
202 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
205 /* We are resuming, try to lock the device */
206 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
214 pa_assert(!u
->reserve_slot
);
215 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
220 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
226 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
228 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
232 static void monitor_done(struct userdata
*u
) {
235 if (u
->monitor_slot
) {
236 pa_hook_slot_free(u
->monitor_slot
);
237 u
->monitor_slot
= NULL
;
241 pa_reserve_monitor_wrapper_unref(u
->monitor
);
246 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
252 if (pa_in_system_mode())
255 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
258 /* We are resuming, try to lock the device */
259 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
265 pa_assert(!u
->monitor_slot
);
266 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
271 static void fix_min_sleep_wakeup(struct userdata
*u
) {
272 size_t max_use
, max_use_2
;
275 pa_assert(u
->use_tsched
);
277 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
278 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
280 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
281 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
283 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
284 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
287 static void fix_tsched_watermark(struct userdata
*u
) {
290 pa_assert(u
->use_tsched
);
292 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
294 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
295 u
->tsched_watermark
= max_use
- u
->min_sleep
;
297 if (u
->tsched_watermark
< u
->min_wakeup
)
298 u
->tsched_watermark
= u
->min_wakeup
;
301 static void increase_watermark(struct userdata
*u
) {
302 size_t old_watermark
;
303 pa_usec_t old_min_latency
, new_min_latency
;
306 pa_assert(u
->use_tsched
);
308 /* First, just try to increase the watermark */
309 old_watermark
= u
->tsched_watermark
;
310 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_inc_step
);
311 fix_tsched_watermark(u
);
313 if (old_watermark
!= u
->tsched_watermark
) {
314 pa_log_info("Increasing wakeup watermark to %0.2f ms",
315 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
319 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
320 old_min_latency
= u
->sink
->thread_info
.min_latency
;
321 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_INC_STEP_USEC
);
322 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
324 if (old_min_latency
!= new_min_latency
) {
325 pa_log_info("Increasing minimal latency to %0.2f ms",
326 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
328 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
331 /* When we reach this we're officialy fucked! */
334 static void decrease_watermark(struct userdata
*u
) {
335 size_t old_watermark
;
339 pa_assert(u
->use_tsched
);
341 now
= pa_rtclock_now();
343 if (u
->watermark_dec_not_before
<= 0)
346 if (u
->watermark_dec_not_before
> now
)
349 old_watermark
= u
->tsched_watermark
;
351 if (u
->tsched_watermark
< u
->watermark_dec_step
)
352 u
->tsched_watermark
= u
->tsched_watermark
/ 2;
354 u
->tsched_watermark
= PA_MAX(u
->tsched_watermark
/ 2, u
->tsched_watermark
- u
->watermark_dec_step
);
356 fix_tsched_watermark(u
);
358 if (old_watermark
!= u
->tsched_watermark
)
359 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
360 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
362 /* We don't change the latency range*/
365 u
->watermark_dec_not_before
= now
+ TSCHED_WATERMARK_VERIFY_AFTER_USEC
;
368 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
371 pa_assert(sleep_usec
);
372 pa_assert(process_usec
);
375 pa_assert(u
->use_tsched
);
377 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
379 if (usec
== (pa_usec_t
) -1)
380 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
382 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
387 *sleep_usec
= usec
- wm
;
391 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
392 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
393 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
394 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
398 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
403 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
405 pa_assert(err
!= -EAGAIN
);
408 pa_log_debug("%s: Buffer underrun!", call
);
410 if (err
== -ESTRPIPE
)
411 pa_log_debug("%s: System suspended!", call
);
413 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
414 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
423 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
, pa_bool_t on_timeout
) {
425 pa_bool_t underrun
= FALSE
;
427 /* We use <= instead of < for this check here because an underrun
428 * only happens after the last sample was processed, not already when
429 * it is removed from the buffer. This is particularly important
430 * when block transfer is used. */
432 if (n_bytes
<= u
->hwbuf_size
)
433 left_to_play
= u
->hwbuf_size
- n_bytes
;
436 /* We got a dropout. What a mess! */
444 if (!u
->first
&& !u
->after_rewind
)
445 if (pa_log_ratelimit(PA_LOG_INFO
))
446 pa_log_info("Underrun!");
450 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
451 (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
452 (double) pa_bytes_to_usec(u
->watermark_inc_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
,
453 (double) pa_bytes_to_usec(u
->watermark_dec_threshold
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
457 pa_bool_t reset_not_before
= TRUE
;
459 if (!u
->first
&& !u
->after_rewind
) {
460 if (underrun
|| left_to_play
< u
->watermark_inc_threshold
)
461 increase_watermark(u
);
462 else if (left_to_play
> u
->watermark_dec_threshold
) {
463 reset_not_before
= FALSE
;
465 /* We decrease the watermark only if have actually
466 * been woken up by a timeout. If something else woke
467 * us up it's too easy to fulfill the deadlines... */
470 decrease_watermark(u
);
474 if (reset_not_before
)
475 u
->watermark_dec_not_before
= 0;
481 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
482 pa_bool_t work_done
= FALSE
;
483 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
488 pa_sink_assert_ref(u
->sink
);
491 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
497 pa_bool_t after_avail
= TRUE
;
499 /* First we determine how many samples are missing to fill the
500 * buffer up to 100% */
502 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
504 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
510 n_bytes
= (size_t) n
* u
->frame_size
;
513 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
516 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
521 /* We won't fill up the playback buffer before at least
522 * half the sleep time is over because otherwise we might
523 * ask for more data from the clients then they expect. We
524 * need to guarantee that clients only have to keep around
525 * a single hw buffer length. */
528 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
530 pa_log_debug("Not filling up, because too early.");
535 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
539 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
540 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
541 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
542 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
548 pa_log_debug("Not filling up, because not necessary.");
556 pa_log_debug("Not filling up, because already too many iterations.");
562 n_bytes
-= u
->hwbuf_unused
;
566 pa_log_debug("Filling up");
573 const snd_pcm_channel_area_t
*areas
;
574 snd_pcm_uframes_t offset
, frames
;
575 snd_pcm_sframes_t sframes
;
577 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
578 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
580 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
582 if (!after_avail
&& err
== -EAGAIN
)
585 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
591 /* Make sure that if these memblocks need to be copied they will fit into one slot */
592 if (frames
> pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
)
593 frames
= pa_mempool_block_size_max(u
->core
->mempool
)/u
->frame_size
;
595 if (!after_avail
&& frames
== 0)
598 pa_assert(frames
> 0);
601 /* Check these are multiples of 8 bit */
602 pa_assert((areas
[0].first
& 7) == 0);
603 pa_assert((areas
[0].step
& 7)== 0);
605 /* We assume a single interleaved memory buffer */
606 pa_assert((areas
[0].first
>> 3) == 0);
607 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
609 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
611 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
612 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
615 pa_sink_render_into_full(u
->sink
, &chunk
);
616 pa_memblock_unref_fixed(chunk
.memblock
);
618 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
620 if (!after_avail
&& (int) sframes
== -EAGAIN
)
623 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
631 u
->write_count
+= frames
* u
->frame_size
;
632 u
->since_start
+= frames
* u
->frame_size
;
635 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
638 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
641 n_bytes
-= (size_t) frames
* u
->frame_size
;
646 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
647 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
649 if (*sleep_usec
> process_usec
)
650 *sleep_usec
-= process_usec
;
656 return work_done
? 1 : 0;
659 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
, pa_bool_t on_timeout
) {
660 pa_bool_t work_done
= FALSE
;
661 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
666 pa_sink_assert_ref(u
->sink
);
669 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
675 pa_bool_t after_avail
= TRUE
;
677 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
679 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
685 n_bytes
= (size_t) n
* u
->frame_size
;
686 left_to_play
= check_left_to_play(u
, n_bytes
, on_timeout
);
691 /* We won't fill up the playback buffer before at least
692 * half the sleep time is over because otherwise we might
693 * ask for more data from the clients then they expect. We
694 * need to guarantee that clients only have to keep around
695 * a single hw buffer length. */
698 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
701 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
705 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
706 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
707 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
708 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
718 pa_log_debug("Not filling up, because already too many iterations.");
724 n_bytes
-= u
->hwbuf_unused
;
728 snd_pcm_sframes_t frames
;
731 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
733 if (u
->memchunk
.length
<= 0)
734 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
736 pa_assert(u
->memchunk
.length
> 0);
738 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
740 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
741 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
743 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
744 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
745 pa_memblock_release(u
->memchunk
.memblock
);
747 if (PA_UNLIKELY(frames
< 0)) {
749 if (!after_avail
&& (int) frames
== -EAGAIN
)
752 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
758 if (!after_avail
&& frames
== 0)
761 pa_assert(frames
> 0);
764 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
765 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
767 if (u
->memchunk
.length
<= 0) {
768 pa_memblock_unref(u
->memchunk
.memblock
);
769 pa_memchunk_reset(&u
->memchunk
);
774 u
->write_count
+= frames
* u
->frame_size
;
775 u
->since_start
+= frames
* u
->frame_size
;
777 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
779 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
782 n_bytes
-= (size_t) frames
* u
->frame_size
;
787 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
788 process_usec
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
790 if (*sleep_usec
> process_usec
)
791 *sleep_usec
-= process_usec
;
797 return work_done
? 1 : 0;
800 static void update_smoother(struct userdata
*u
) {
801 snd_pcm_sframes_t delay
= 0;
804 pa_usec_t now1
= 0, now2
;
805 snd_pcm_status_t
*status
;
807 snd_pcm_status_alloca(&status
);
810 pa_assert(u
->pcm_handle
);
812 /* Let's update the time smoother */
814 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
, FALSE
)) < 0)) {
815 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
819 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
820 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
822 snd_htimestamp_t htstamp
= { 0, 0 };
823 snd_pcm_status_get_htstamp(status
, &htstamp
);
824 now1
= pa_timespec_load(&htstamp
);
827 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
829 now1
= pa_rtclock_now();
831 /* check if the time since the last update is bigger than the interval */
832 if (u
->last_smoother_update
> 0)
833 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
836 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
838 if (PA_UNLIKELY(position
< 0))
841 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
843 pa_smoother_put(u
->smoother
, now1
, now2
);
845 u
->last_smoother_update
= now1
;
846 /* exponentially increase the update interval up to the MAX limit */
847 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
850 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
853 pa_usec_t now1
, now2
;
857 now1
= pa_rtclock_now();
858 now2
= pa_smoother_get(u
->smoother
, now1
);
860 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
862 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
864 if (u
->memchunk
.memblock
)
865 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
870 static int build_pollfd(struct userdata
*u
) {
872 pa_assert(u
->pcm_handle
);
874 if (u
->alsa_rtpoll_item
)
875 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
877 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
883 /* Called from IO context */
884 static int suspend(struct userdata
*u
) {
886 pa_assert(u
->pcm_handle
);
888 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
890 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
891 * take awfully long with our long buffer sizes today. */
892 snd_pcm_close(u
->pcm_handle
);
893 u
->pcm_handle
= NULL
;
895 if (u
->alsa_rtpoll_item
) {
896 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
897 u
->alsa_rtpoll_item
= NULL
;
900 /* We reset max_rewind/max_request here to make sure that while we
901 * are suspended the old max_request/max_rewind values set before
902 * the suspend can influence the per-stream buffer of newly
903 * created streams, without their requirements having any
904 * influence on them. */
905 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
906 pa_sink_set_max_request_within_thread(u
->sink
, 0);
908 pa_log_info("Device suspended...");
913 /* Called from IO context */
914 static int update_sw_params(struct userdata
*u
) {
915 snd_pcm_uframes_t avail_min
;
920 /* Use the full buffer if noone asked us for anything specific */
926 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
929 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
931 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
933 /* We need at least one sample in our buffer */
935 if (PA_UNLIKELY(b
< u
->frame_size
))
938 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
941 fix_min_sleep_wakeup(u
);
942 fix_tsched_watermark(u
);
945 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
947 /* We need at last one frame in the used part of the buffer */
948 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
951 pa_usec_t sleep_usec
, process_usec
;
953 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
954 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
957 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
959 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
, !u
->use_tsched
)) < 0) {
960 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
964 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
965 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
966 pa_sink_set_max_rewind_within_thread(u
->sink
, u
->hwbuf_size
);
968 pa_log_info("Disabling rewind_within_thread for device %s", u
->device_name
);
969 pa_sink_set_max_rewind_within_thread(u
->sink
, 0);
975 /* Called from IO context */
976 static int unsuspend(struct userdata
*u
) {
980 snd_pcm_uframes_t period_size
, buffer_size
;
983 pa_assert(!u
->pcm_handle
);
985 pa_log_info("Trying resume...");
987 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
989 SND_PCM_NO_AUTO_RESAMPLE
|
990 SND_PCM_NO_AUTO_CHANNELS
|
991 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
992 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
996 ss
= u
->sink
->sample_spec
;
997 period_size
= u
->fragment_size
/ u
->frame_size
;
998 buffer_size
= u
->hwbuf_size
/ u
->frame_size
;
1002 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &period_size
, &buffer_size
, 0, &b
, &d
, TRUE
)) < 0) {
1003 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
1007 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
1008 pa_log_warn("Resume failed, couldn't get original access mode.");
1012 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
1013 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1017 if (period_size
*u
->frame_size
!= u
->fragment_size
||
1018 buffer_size
*u
->frame_size
!= u
->hwbuf_size
) {
1019 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
1020 (unsigned long) u
->hwbuf_size
, (unsigned long) u
->fragment_size
,
1021 (unsigned long) (buffer_size
*u
->frame_size
), (unsigned long) (period_size
*u
->frame_size
));
1025 if (update_sw_params(u
) < 0)
1028 if (build_pollfd(u
) < 0)
1032 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
1033 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1034 u
->last_smoother_update
= 0;
1039 pa_log_info("Resumed successfully...");
1044 if (u
->pcm_handle
) {
1045 snd_pcm_close(u
->pcm_handle
);
1046 u
->pcm_handle
= NULL
;
1052 /* Called from IO context */
1053 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
1054 struct userdata
*u
= PA_SINK(o
)->userdata
;
1058 case PA_SINK_MESSAGE_FINISH_MOVE
:
1059 case PA_SINK_MESSAGE_ADD_INPUT
: {
1060 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1063 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1066 u
->old_rate
= u
->sink
->sample_spec
.rate
;
1068 /* Passthrough format, see if we need to reset sink sample rate */
1069 if (u
->sink
->sample_spec
.rate
== i
->thread_info
.sample_spec
.rate
)
1073 if ((r
= suspend(u
)) < 0)
1076 u
->sink
->sample_spec
.rate
= i
->thread_info
.sample_spec
.rate
;
1078 if ((r
= unsuspend(u
)) < 0)
1084 case PA_SINK_MESSAGE_START_MOVE
:
1085 case PA_SINK_MESSAGE_REMOVE_INPUT
: {
1086 pa_sink_input
*i
= PA_SINK_INPUT(data
);
1089 if (PA_LIKELY(!pa_sink_input_is_passthrough(i
)))
1092 /* Passthrough format, see if we need to reset sink sample rate */
1093 if (u
->sink
->sample_spec
.rate
== u
->old_rate
)
1097 if ((r
= suspend(u
)) < 0)
1100 u
->sink
->sample_spec
.rate
= u
->old_rate
;
1102 if ((r
= unsuspend(u
)) < 0)
1108 case PA_SINK_MESSAGE_GET_LATENCY
: {
1112 r
= sink_get_latency(u
);
1114 *((pa_usec_t
*) data
) = r
;
1119 case PA_SINK_MESSAGE_SET_STATE
:
1121 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
1123 case PA_SINK_SUSPENDED
: {
1126 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
1128 if ((r
= suspend(u
)) < 0)
1135 case PA_SINK_RUNNING
: {
1138 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
1139 if (build_pollfd(u
) < 0)
1143 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1144 if ((r
= unsuspend(u
)) < 0)
1151 case PA_SINK_UNLINKED
:
1153 case PA_SINK_INVALID_STATE
:
1160 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
1163 /* Called from main context */
1164 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1165 pa_sink_state_t old_state
;
1168 pa_sink_assert_ref(s
);
1169 pa_assert_se(u
= s
->userdata
);
1171 old_state
= pa_sink_get_state(u
->sink
);
1173 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1175 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1176 if (reserve_init(u
, u
->device_name
) < 0)
1177 return -PA_ERR_BUSY
;
1182 static int ctl_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1183 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1186 pa_assert(u
->mixer_handle
);
1188 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1191 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1194 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1195 pa_sink_get_volume(u
->sink
, TRUE
);
1196 pa_sink_get_mute(u
->sink
, TRUE
);
1202 static int io_mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1203 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1206 pa_assert(u
->mixer_handle
);
1208 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1211 if (u
->sink
->suspend_cause
& PA_SUSPEND_SESSION
)
1214 if (mask
& SND_CTL_EVENT_MASK_VALUE
)
1215 pa_sink_update_volume_and_mute(u
->sink
);
1220 static void sink_get_volume_cb(pa_sink
*s
) {
1221 struct userdata
*u
= s
->userdata
;
1223 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1226 pa_assert(u
->mixer_path
);
1227 pa_assert(u
->mixer_handle
);
1229 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1232 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1233 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1235 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1237 if (u
->mixer_path
->has_dB
) {
1238 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1240 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &r
));
1243 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1246 s
->real_volume
= u
->hardware_volume
= r
;
1248 /* Hmm, so the hardware volume changed, let's reset our software volume */
1249 if (u
->mixer_path
->has_dB
)
1250 pa_sink_set_soft_volume(s
, NULL
);
1253 static void sink_set_volume_cb(pa_sink
*s
) {
1254 struct userdata
*u
= s
->userdata
;
1256 char vol_str_pcnt
[PA_CVOLUME_SNPRINT_MAX
];
1257 pa_bool_t sync_volume
= !!(s
->flags
& PA_SINK_SYNC_VOLUME
);
1260 pa_assert(u
->mixer_path
);
1261 pa_assert(u
->mixer_handle
);
1263 /* Shift up by the base volume */
1264 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1266 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
, sync_volume
, !sync_volume
) < 0)
1269 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1270 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1272 u
->hardware_volume
= r
;
1274 if (u
->mixer_path
->has_dB
) {
1275 pa_cvolume new_soft_volume
;
1276 pa_bool_t accurate_enough
;
1277 char vol_str_db
[PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1279 /* Match exactly what the user requested by software */
1280 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1282 /* If the adjustment to do in software is only minimal we
1283 * can skip it. That saves us CPU at the expense of a bit of
1286 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1287 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1289 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &s
->real_volume
));
1290 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &s
->real_volume
));
1291 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &u
->hardware_volume
));
1292 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &u
->hardware_volume
));
1293 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1294 pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &new_soft_volume
),
1295 pa_yes_no(accurate_enough
));
1296 pa_log_debug(" in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db
, sizeof(vol_str_db
), &new_soft_volume
));
1298 if (!accurate_enough
)
1299 s
->soft_volume
= new_soft_volume
;
1302 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt
, sizeof(vol_str_pcnt
), &r
));
1304 /* We can't match exactly what the user requested, hence let's
1305 * at least tell the user about it */
1311 static void sink_write_volume_cb(pa_sink
*s
) {
1312 struct userdata
*u
= s
->userdata
;
1313 pa_cvolume hw_vol
= s
->thread_info
.current_hw_volume
;
1316 pa_assert(u
->mixer_path
);
1317 pa_assert(u
->mixer_handle
);
1318 pa_assert(s
->flags
& PA_SINK_SYNC_VOLUME
);
1320 /* Shift up by the base volume */
1321 pa_sw_cvolume_divide_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1323 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &hw_vol
, TRUE
, TRUE
) < 0)
1324 pa_log_error("Writing HW volume failed");
1327 pa_bool_t accurate_enough
;
1329 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1330 pa_sw_cvolume_multiply_scalar(&hw_vol
, &hw_vol
, s
->base_volume
);
1332 pa_sw_cvolume_divide(&tmp_vol
, &hw_vol
, &s
->thread_info
.current_hw_volume
);
1334 (pa_cvolume_min(&tmp_vol
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1335 (pa_cvolume_max(&tmp_vol
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1337 if (!accurate_enough
) {
1339 char db
[2][PA_SW_CVOLUME_SNPRINT_DB_MAX
];
1340 char pcnt
[2][PA_CVOLUME_SNPRINT_MAX
];
1343 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1344 pa_cvolume_snprint(vol
.pcnt
[0], sizeof(vol
.pcnt
[0]), &s
->thread_info
.current_hw_volume
),
1345 pa_cvolume_snprint(vol
.pcnt
[1], sizeof(vol
.pcnt
[1]), &hw_vol
));
1346 pa_log_debug(" in dB: %s (request) != %s",
1347 pa_sw_cvolume_snprint_dB(vol
.db
[0], sizeof(vol
.db
[0]), &s
->thread_info
.current_hw_volume
),
1348 pa_sw_cvolume_snprint_dB(vol
.db
[1], sizeof(vol
.db
[1]), &hw_vol
));
1353 static void sink_get_mute_cb(pa_sink
*s
) {
1354 struct userdata
*u
= s
->userdata
;
1358 pa_assert(u
->mixer_path
);
1359 pa_assert(u
->mixer_handle
);
1361 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1367 static void sink_set_mute_cb(pa_sink
*s
) {
1368 struct userdata
*u
= s
->userdata
;
1371 pa_assert(u
->mixer_path
);
1372 pa_assert(u
->mixer_handle
);
1374 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1377 static void mixer_volume_init(struct userdata
*u
) {
1380 if (!u
->mixer_path
->has_volume
) {
1381 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1382 pa_sink_set_get_volume_callback(u
->sink
, NULL
);
1383 pa_sink_set_set_volume_callback(u
->sink
, NULL
);
1385 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1387 pa_sink_set_get_volume_callback(u
->sink
, sink_get_volume_cb
);
1388 pa_sink_set_set_volume_callback(u
->sink
, sink_set_volume_cb
);
1390 if (u
->mixer_path
->has_dB
&& u
->sync_volume
) {
1391 pa_sink_set_write_volume_callback(u
->sink
, sink_write_volume_cb
);
1392 pa_log_info("Successfully enabled synchronous volume.");
1394 pa_sink_set_write_volume_callback(u
->sink
, NULL
);
1396 if (u
->mixer_path
->has_dB
) {
1397 pa_sink_enable_decibel_volume(u
->sink
, TRUE
);
1398 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1400 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1401 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1403 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1405 pa_sink_enable_decibel_volume(u
->sink
, FALSE
);
1406 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1408 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1409 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1412 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1415 if (!u
->mixer_path
->has_mute
) {
1416 pa_sink_set_get_mute_callback(u
->sink
, NULL
);
1417 pa_sink_set_set_mute_callback(u
->sink
, NULL
);
1418 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1420 pa_sink_set_get_mute_callback(u
->sink
, sink_get_mute_cb
);
1421 pa_sink_set_set_mute_callback(u
->sink
, sink_set_mute_cb
);
1422 pa_log_info("Using hardware mute control.");
1426 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1427 struct userdata
*u
= s
->userdata
;
1428 pa_alsa_port_data
*data
;
1432 pa_assert(u
->mixer_handle
);
1434 data
= PA_DEVICE_PORT_DATA(p
);
1436 pa_assert_se(u
->mixer_path
= data
->path
);
1437 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1439 mixer_volume_init(u
);
1442 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1452 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1453 struct userdata
*u
= s
->userdata
;
1456 pa_assert(u
->use_tsched
); /* only when timer scheduling is used
1457 * we can dynamically adjust the
1463 before
= u
->hwbuf_unused
;
1464 update_sw_params(u
);
1466 /* Let's check whether we now use only a smaller part of the
1467 buffer then before. If so, we need to make sure that subsequent
1468 rewinds are relative to the new maximum fill level and not to the
1469 current fill level. Thus, let's do a full rewind once, to clear
1472 if (u
->hwbuf_unused
> before
) {
1473 pa_log_debug("Requesting rewind due to latency change.");
1474 pa_sink_request_rewind(s
, (size_t) -1);
1478 static int process_rewind(struct userdata
*u
) {
1479 snd_pcm_sframes_t unused
;
1480 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1483 /* Figure out how much we shall rewind and reset the counter */
1484 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1486 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1488 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1489 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1493 unused_nbytes
= (size_t) unused
* u
->frame_size
;
1495 /* make sure rewind doesn't go too far, can cause issues with DMAs */
1496 unused_nbytes
+= u
->rewind_safeguard
;
1498 if (u
->hwbuf_size
> unused_nbytes
)
1499 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1503 if (rewind_nbytes
> limit_nbytes
)
1504 rewind_nbytes
= limit_nbytes
;
1506 if (rewind_nbytes
> 0) {
1507 snd_pcm_sframes_t in_frames
, out_frames
;
1509 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1511 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1512 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1513 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1514 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1515 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1520 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1522 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1524 if (rewind_nbytes
<= 0)
1525 pa_log_info("Tried rewind, but was apparently not possible.");
1527 u
->write_count
-= rewind_nbytes
;
1528 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1529 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1531 u
->after_rewind
= TRUE
;
1535 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1537 pa_sink_process_rewind(u
->sink
, 0);
1541 static void thread_func(void *userdata
) {
1542 struct userdata
*u
= userdata
;
1543 unsigned short revents
= 0;
1547 pa_log_debug("Thread starting up");
1549 if (u
->core
->realtime_scheduling
)
1550 pa_make_realtime(u
->core
->realtime_priority
);
1552 pa_thread_mq_install(&u
->thread_mq
);
1556 pa_usec_t rtpoll_sleep
= 0;
1559 pa_log_debug("Loop");
1562 /* Render some data and write it to the dsp */
1563 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1565 pa_usec_t sleep_usec
= 0;
1566 pa_bool_t on_timeout
= pa_rtpoll_timer_elapsed(u
->rtpoll
);
1568 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1569 if (process_rewind(u
) < 0)
1573 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1575 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
, on_timeout
);
1580 /* pa_log_debug("work_done = %i", work_done); */
1585 pa_log_info("Starting playback.");
1586 snd_pcm_start(u
->pcm_handle
);
1588 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1596 if (u
->use_tsched
) {
1599 if (u
->since_start
<= u
->hwbuf_size
) {
1601 /* USB devices on ALSA seem to hit a buffer
1602 * underrun during the first iterations much
1603 * quicker then we calculate here, probably due to
1604 * the transport latency. To accommodate for that
1605 * we artificially decrease the sleep time until
1606 * we have filled the buffer at least once
1609 if (pa_log_ratelimit(PA_LOG_DEBUG
))
1610 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1614 /* OK, the playback buffer is now full, let's
1615 * calculate when to wake up next */
1616 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1618 /* Convert from the sound card time domain to the
1619 * system time domain */
1620 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1622 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1624 /* We don't trust the conversion, so we wake up whatever comes first */
1625 rtpoll_sleep
= PA_MIN(sleep_usec
, cusec
);
1628 u
->after_rewind
= FALSE
;
1632 if (u
->sink
->flags
& PA_SINK_SYNC_VOLUME
) {
1633 pa_usec_t volume_sleep
;
1634 pa_sink_volume_change_apply(u
->sink
, &volume_sleep
);
1635 if (volume_sleep
> 0)
1636 rtpoll_sleep
= PA_MIN(volume_sleep
, rtpoll_sleep
);
1639 if (rtpoll_sleep
> 0)
1640 pa_rtpoll_set_timer_relative(u
->rtpoll
, rtpoll_sleep
);
1642 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1644 /* Hmm, nothing to do. Let's sleep */
1645 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1648 if (u
->sink
->flags
& PA_SINK_SYNC_VOLUME
)
1649 pa_sink_volume_change_apply(u
->sink
, NULL
);
1654 /* Tell ALSA about this and process its response */
1655 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1656 struct pollfd
*pollfd
;
1660 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1662 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1663 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1667 if (revents
& ~POLLOUT
) {
1668 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1673 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit(PA_LOG_DEBUG
))
1674 pa_log_debug("Wakeup from ALSA!");
1681 /* If this was no regular exit from the loop we have to continue
1682 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1683 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1684 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1687 pa_log_debug("Thread shutting down");
1690 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1696 pa_assert(device_name
);
1698 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1699 pa_sink_new_data_set_name(data
, n
);
1700 data
->namereg_fail
= TRUE
;
1704 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1705 data
->namereg_fail
= TRUE
;
1707 n
= device_id
? device_id
: device_name
;
1708 data
->namereg_fail
= FALSE
;
1712 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1714 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1716 pa_sink_new_data_set_name(data
, t
);
1720 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1722 if (!mapping
&& !element
)
1725 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1726 pa_log_info("Failed to find a working mixer device.");
1732 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1735 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1738 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1739 pa_alsa_path_dump(u
->mixer_path
);
1742 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1745 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1752 if (u
->mixer_path_set
) {
1753 pa_alsa_path_set_free(u
->mixer_path_set
);
1754 u
->mixer_path_set
= NULL
;
1755 } else if (u
->mixer_path
) {
1756 pa_alsa_path_free(u
->mixer_path
);
1757 u
->mixer_path
= NULL
;
1760 if (u
->mixer_handle
) {
1761 snd_mixer_close(u
->mixer_handle
);
1762 u
->mixer_handle
= NULL
;
1767 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1768 pa_bool_t need_mixer_callback
= FALSE
;
1772 if (!u
->mixer_handle
)
1775 if (u
->sink
->active_port
) {
1776 pa_alsa_port_data
*data
;
1778 /* We have a list of supported paths, so let's activate the
1779 * one that has been chosen as active */
1781 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1782 u
->mixer_path
= data
->path
;
1784 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1787 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1791 if (!u
->mixer_path
&& u
->mixer_path_set
)
1792 u
->mixer_path
= u
->mixer_path_set
->paths
;
1794 if (u
->mixer_path
) {
1795 /* Hmm, we have only a single path, then let's activate it */
1797 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1799 if (u
->mixer_path
->settings
)
1800 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1805 mixer_volume_init(u
);
1807 /* Will we need to register callbacks? */
1808 if (u
->mixer_path_set
&& u
->mixer_path_set
->paths
) {
1811 PA_LLIST_FOREACH(p
, u
->mixer_path_set
->paths
) {
1812 if (p
->has_volume
|| p
->has_mute
)
1813 need_mixer_callback
= TRUE
;
1816 else if (u
->mixer_path
)
1817 need_mixer_callback
= u
->mixer_path
->has_volume
|| u
->mixer_path
->has_mute
;
1819 if (need_mixer_callback
) {
1820 int (*mixer_callback
)(snd_mixer_elem_t
*, unsigned int);
1821 if (u
->sink
->flags
& PA_SINK_SYNC_VOLUME
) {
1822 u
->mixer_pd
= pa_alsa_mixer_pdata_new();
1823 mixer_callback
= io_mixer_callback
;
1825 if (pa_alsa_set_mixer_rtpoll(u
->mixer_pd
, u
->mixer_handle
, u
->rtpoll
) < 0) {
1826 pa_log("Failed to initialize file descriptor monitoring");
1830 u
->mixer_fdl
= pa_alsa_fdlist_new();
1831 mixer_callback
= ctl_mixer_callback
;
1833 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1834 pa_log("Failed to initialize file descriptor monitoring");
1839 if (u
->mixer_path_set
)
1840 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1842 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1848 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1850 struct userdata
*u
= NULL
;
1851 const char *dev_id
= NULL
;
1852 pa_sample_spec ss
, requested_ss
;
1854 uint32_t nfrags
, frag_size
, buffer_size
, tsched_size
, tsched_watermark
, rewind_safeguard
;
1855 snd_pcm_uframes_t period_frames
, buffer_frames
, tsched_frames
;
1857 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
, namereg_fail
= FALSE
, sync_volume
= FALSE
;
1858 pa_sink_new_data data
;
1859 pa_alsa_profile_set
*profile_set
= NULL
;
1864 ss
= m
->core
->default_sample_spec
;
1865 map
= m
->core
->default_channel_map
;
1866 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1867 pa_log("Failed to parse sample specification and channel map");
1872 frame_size
= pa_frame_size(&ss
);
1874 nfrags
= m
->core
->default_n_fragments
;
1875 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1877 frag_size
= (uint32_t) frame_size
;
1878 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1879 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1881 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1882 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1883 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1884 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1885 pa_log("Failed to parse buffer metrics");
1889 buffer_size
= nfrags
* frag_size
;
1891 period_frames
= frag_size
/frame_size
;
1892 buffer_frames
= buffer_size
/frame_size
;
1893 tsched_frames
= tsched_size
/frame_size
;
1895 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1896 pa_log("Failed to parse mmap argument.");
1900 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1901 pa_log("Failed to parse tsched argument.");
1905 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1906 pa_log("Failed to parse ignore_dB argument.");
1910 rewind_safeguard
= PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES
, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC
, &ss
));
1911 if (pa_modargs_get_value_u32(ma
, "rewind_safeguard", &rewind_safeguard
) < 0) {
1912 pa_log("Failed to parse rewind_safeguard argument");
1916 sync_volume
= m
->core
->sync_volume
;
1917 if (pa_modargs_get_value_boolean(ma
, "sync_volume", &sync_volume
) < 0) {
1918 pa_log("Failed to parse sync_volume argument.");
1922 use_tsched
= pa_alsa_may_tsched(use_tsched
);
1924 u
= pa_xnew0(struct userdata
, 1);
1927 u
->use_mmap
= use_mmap
;
1928 u
->use_tsched
= use_tsched
;
1929 u
->sync_volume
= sync_volume
;
1931 u
->rewind_safeguard
= rewind_safeguard
;
1932 u
->rtpoll
= pa_rtpoll_new();
1933 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1935 u
->smoother
= pa_smoother_new(
1936 SMOOTHER_ADJUST_USEC
,
1937 SMOOTHER_WINDOW_USEC
,
1943 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1945 dev_id
= pa_modargs_get_value(
1947 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1949 if (reserve_init(u
, dev_id
) < 0)
1952 if (reserve_monitor_init(u
, dev_id
) < 0)
1960 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1961 pa_log("device_id= not set");
1965 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1969 SND_PCM_STREAM_PLAYBACK
,
1970 &period_frames
, &buffer_frames
, tsched_frames
,
1974 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1976 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1979 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1983 SND_PCM_STREAM_PLAYBACK
,
1984 &period_frames
, &buffer_frames
, tsched_frames
,
1985 &b
, &d
, profile_set
, &mapping
)))
1990 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1991 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1994 SND_PCM_STREAM_PLAYBACK
,
1995 &period_frames
, &buffer_frames
, tsched_frames
,
2000 pa_assert(u
->device_name
);
2001 pa_log_info("Successfully opened device %s.", u
->device_name
);
2003 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
2004 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
2009 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
2011 if (use_mmap
&& !b
) {
2012 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2013 u
->use_mmap
= use_mmap
= FALSE
;
2016 if (use_tsched
&& (!b
|| !d
)) {
2017 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2018 u
->use_tsched
= use_tsched
= FALSE
;
2022 pa_log_info("Successfully enabled mmap() mode.");
2025 pa_log_info("Successfully enabled timer-based scheduling mode.");
2027 /* ALSA might tweak the sample spec, so recalculate the frame size */
2028 frame_size
= pa_frame_size(&ss
);
2030 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
2032 pa_sink_new_data_init(&data
);
2033 data
.driver
= driver
;
2036 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
2038 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2039 * variable instead of using &data.namereg_fail directly, because
2040 * data.namereg_fail is a bitfield and taking the address of a bitfield
2041 * variable is impossible. */
2042 namereg_fail
= data
.namereg_fail
;
2043 if (pa_modargs_get_value_boolean(ma
, "namereg_fail", &namereg_fail
) < 0) {
2044 pa_log("Failed to parse boolean argument namereg_fail.");
2045 pa_sink_new_data_done(&data
);
2048 data
.namereg_fail
= namereg_fail
;
2050 pa_sink_new_data_set_sample_spec(&data
, &ss
);
2051 pa_sink_new_data_set_channel_map(&data
, &map
);
2053 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
2054 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
2055 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (buffer_frames
* frame_size
));
2056 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
2057 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
2060 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
2061 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
2064 pa_alsa_init_description(data
.proplist
);
2066 if (u
->control_device
)
2067 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
2069 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
2070 pa_log("Invalid properties");
2071 pa_sink_new_data_done(&data
);
2075 if (u
->mixer_path_set
)
2076 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
2078 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
2079 pa_sink_new_data_done(&data
);
2082 pa_log("Failed to create sink object");
2086 if (pa_modargs_get_value_u32(ma
, "sync_volume_safety_margin",
2087 &u
->sink
->thread_info
.volume_change_safety_margin
) < 0) {
2088 pa_log("Failed to parse sync_volume_safety_margin parameter");
2092 if (pa_modargs_get_value_s32(ma
, "sync_volume_extra_delay",
2093 &u
->sink
->thread_info
.volume_change_extra_delay
) < 0) {
2094 pa_log("Failed to parse sync_volume_extra_delay parameter");
2098 u
->sink
->parent
.process_msg
= sink_process_msg
;
2100 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
2101 u
->sink
->set_state
= sink_set_state_cb
;
2102 u
->sink
->set_port
= sink_set_port_cb
;
2103 u
->sink
->userdata
= u
;
2105 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
2106 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
2108 u
->frame_size
= frame_size
;
2109 u
->fragment_size
= frag_size
= (size_t) (period_frames
* frame_size
);
2110 u
->hwbuf_size
= buffer_size
= (size_t) (buffer_frames
* frame_size
);
2111 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
2113 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2114 (double) u
->hwbuf_size
/ (double) u
->fragment_size
,
2115 (long unsigned) u
->fragment_size
,
2116 (double) pa_bytes_to_usec(u
->fragment_size
, &ss
) / PA_USEC_PER_MSEC
,
2117 (long unsigned) u
->hwbuf_size
,
2118 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
2120 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
2121 if (pa_alsa_pcm_is_hw(u
->pcm_handle
))
2122 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
2124 pa_log_info("Disabling rewind for device %s", u
->device_name
);
2125 pa_sink_set_max_rewind(u
->sink
, 0);
2128 if (u
->use_tsched
) {
2129 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
2131 u
->watermark_inc_step
= pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC
, &u
->sink
->sample_spec
);
2132 u
->watermark_dec_step
= pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC
, &u
->sink
->sample_spec
);
2134 u
->watermark_inc_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
2135 u
->watermark_dec_threshold
= pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC
, &u
->sink
->sample_spec
);
2137 fix_min_sleep_wakeup(u
);
2138 fix_tsched_watermark(u
);
2140 pa_sink_set_latency_range(u
->sink
,
2142 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2144 pa_log_info("Time scheduling watermark is %0.2fms",
2145 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
2147 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
2151 if (update_sw_params(u
) < 0)
2154 if (setup_mixer(u
, ignore_dB
) < 0)
2157 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
2159 if (!(u
->thread
= pa_thread_new("alsa-sink", thread_func
, u
))) {
2160 pa_log("Failed to create thread.");
2164 /* Get initial mixer settings */
2165 if (data
.volume_is_set
) {
2166 if (u
->sink
->set_volume
)
2167 u
->sink
->set_volume(u
->sink
);
2169 if (u
->sink
->get_volume
)
2170 u
->sink
->get_volume(u
->sink
);
2173 if (data
.muted_is_set
) {
2174 if (u
->sink
->set_mute
)
2175 u
->sink
->set_mute(u
->sink
);
2177 if (u
->sink
->get_mute
)
2178 u
->sink
->get_mute(u
->sink
);
2181 if ((data
.volume_is_set
|| data
.muted_is_set
) && u
->sink
->write_volume
)
2182 u
->sink
->write_volume(u
->sink
);
2184 pa_sink_put(u
->sink
);
2187 pa_alsa_profile_set_free(profile_set
);
2197 pa_alsa_profile_set_free(profile_set
);
2202 static void userdata_free(struct userdata
*u
) {
2206 pa_sink_unlink(u
->sink
);
2209 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
2210 pa_thread_free(u
->thread
);
2213 pa_thread_mq_done(&u
->thread_mq
);
2216 pa_sink_unref(u
->sink
);
2218 if (u
->memchunk
.memblock
)
2219 pa_memblock_unref(u
->memchunk
.memblock
);
2222 pa_alsa_mixer_pdata_free(u
->mixer_pd
);
2224 if (u
->alsa_rtpoll_item
)
2225 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
2228 pa_rtpoll_free(u
->rtpoll
);
2230 if (u
->pcm_handle
) {
2231 snd_pcm_drop(u
->pcm_handle
);
2232 snd_pcm_close(u
->pcm_handle
);
2236 pa_alsa_fdlist_free(u
->mixer_fdl
);
2238 if (u
->mixer_path_set
)
2239 pa_alsa_path_set_free(u
->mixer_path_set
);
2240 else if (u
->mixer_path
)
2241 pa_alsa_path_free(u
->mixer_path
);
2243 if (u
->mixer_handle
)
2244 snd_mixer_close(u
->mixer_handle
);
2247 pa_smoother_free(u
->smoother
);
2252 pa_xfree(u
->device_name
);
2253 pa_xfree(u
->control_device
);
2257 void pa_alsa_sink_free(pa_sink
*s
) {
2260 pa_sink_assert_ref(s
);
2261 pa_assert_se(u
= s
->userdata
);