2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
55 #include "alsa-util.h"
56 #include "alsa-sink.h"
58 #define DEFAULT_DEVICE "default"
59 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
60 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
61 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
62 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
70 pa_thread_mq thread_mq
;
73 snd_pcm_t
*pcm_handle
;
75 pa_alsa_fdlist
*mixer_fdl
;
76 snd_mixer_t
*mixer_handle
;
77 snd_mixer_elem_t
*mixer_elem
;
78 long hw_volume_max
, hw_volume_min
;
79 long hw_dB_max
, hw_dB_min
;
80 pa_bool_t hw_dB_supported
;
81 pa_bool_t mixer_seperate_channels
;
82 pa_cvolume hardware_volume
;
84 size_t frame_size
, fragment_size
, hwbuf_size
, tsched_watermark
;
90 pa_bool_t use_mmap
, use_tsched
;
92 pa_bool_t first
, after_rewind
;
94 pa_rtpoll_item
*alsa_rtpoll_item
;
96 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
98 pa_smoother
*smoother
;
100 uint64_t since_start
;
102 snd_pcm_sframes_t hwbuf_unused_frames
;
105 static void userdata_free(struct userdata
*u
);
107 static void fix_tsched_watermark(struct userdata
*u
) {
109 size_t min_sleep
, min_wakeup
;
112 max_use
= u
->hwbuf_size
- (size_t) u
->hwbuf_unused_frames
* u
->frame_size
;
114 min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
115 min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
117 if (min_sleep
> max_use
/2)
118 min_sleep
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
119 if (min_sleep
< u
->frame_size
)
120 min_sleep
= u
->frame_size
;
122 if (min_wakeup
> max_use
/2)
123 min_wakeup
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
124 if (min_wakeup
< u
->frame_size
)
125 min_wakeup
= u
->frame_size
;
127 if (u
->tsched_watermark
> max_use
-min_sleep
)
128 u
->tsched_watermark
= max_use
-min_sleep
;
130 if (u
->tsched_watermark
< min_wakeup
)
131 u
->tsched_watermark
= min_wakeup
;
134 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
137 pa_assert(sleep_usec
);
138 pa_assert(process_usec
);
142 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
144 if (usec
== (pa_usec_t
) -1)
145 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
147 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
149 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
152 *sleep_usec
= usec
- wm
;
155 *process_usec
= *sleep_usec
= usec
/ 2;
157 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
160 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
165 pa_log_debug("%s: %s", call
, snd_strerror(err
));
167 pa_assert(err
!= -EAGAIN
);
170 pa_log_debug("%s: Buffer underrun!", call
);
172 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) == 0) {
178 pa_log("%s: %s", call
, snd_strerror(err
));
182 static size_t check_left_to_play(struct userdata
*u
, snd_pcm_sframes_t n
) {
185 if ((size_t) n
*u
->frame_size
< u
->hwbuf_size
)
186 left_to_play
= u
->hwbuf_size
- ((size_t) n
*u
->frame_size
);
190 if (left_to_play
> 0) {
191 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
192 } else if (!u
->first
&& !u
->after_rewind
) {
193 pa_log_info("Underrun!");
196 size_t old_watermark
= u
->tsched_watermark
;
198 u
->tsched_watermark
*= 2;
199 fix_tsched_watermark(u
);
201 if (old_watermark
!= u
->tsched_watermark
)
202 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
203 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
210 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
212 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
216 pa_sink_assert_ref(u
->sink
);
219 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
225 snd_pcm_hwsync(u
->pcm_handle
);
227 /* First we determine how many samples are missing to fill the
228 * buffer up to 100% */
230 if (PA_UNLIKELY((n
= pa_alsa_safe_avail_update(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
232 if ((r
= try_recover(u
, "snd_pcm_avail_update", (int) n
)) == 0)
238 left_to_play
= check_left_to_play(u
, n
);
242 /* We won't fill up the playback buffer before at least
243 * half the sleep time is over because otherwise we might
244 * ask for more data from the clients then they expect. We
245 * need to guarantee that clients only have to keep around
246 * a single hw buffer length. */
249 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
252 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
)) {
254 if (polled
&& pa_log_ratelimit())
255 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
256 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
257 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
262 n
-= u
->hwbuf_unused_frames
;
266 /* pa_log_debug("Filling up"); */
272 const snd_pcm_channel_area_t
*areas
;
273 snd_pcm_uframes_t offset
, frames
= (snd_pcm_uframes_t
) n
;
274 snd_pcm_sframes_t sframes
;
276 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
278 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
280 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
286 /* Make sure that if these memblocks need to be copied they will fit into one slot */
287 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
288 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
290 /* Check these are multiples of 8 bit */
291 pa_assert((areas
[0].first
& 7) == 0);
292 pa_assert((areas
[0].step
& 7)== 0);
294 /* We assume a single interleaved memory buffer */
295 pa_assert((areas
[0].first
>> 3) == 0);
296 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
298 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
300 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
301 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
304 pa_sink_render_into_full(u
->sink
, &chunk
);
306 /* FIXME: Maybe we can do something to keep this memory block
307 * a little bit longer around? */
308 pa_memblock_unref_fixed(chunk
.memblock
);
310 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
312 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
320 u
->frame_index
+= (int64_t) frames
;
321 u
->since_start
+= frames
* u
->frame_size
;
323 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
325 if (frames
>= (snd_pcm_uframes_t
) n
)
328 n
-= (snd_pcm_sframes_t
) frames
;
332 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
336 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
338 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
342 pa_sink_assert_ref(u
->sink
);
345 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
351 snd_pcm_hwsync(u
->pcm_handle
);
353 if (PA_UNLIKELY((n
= pa_alsa_safe_avail_update(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
355 if ((r
= try_recover(u
, "snd_pcm_avail_update", (int) n
)) == 0)
361 left_to_play
= check_left_to_play(u
, n
);
365 /* We won't fill up the playback buffer before at least
366 * half the sleep time is over because otherwise we might
367 * ask for more data from the clients then they expect. We
368 * need to guarantee that clients only have to keep around
369 * a single hw buffer length. */
372 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
375 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
)) {
377 if (polled
&& pa_log_ratelimit())
378 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
379 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
380 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
385 n
-= u
->hwbuf_unused_frames
;
390 snd_pcm_sframes_t frames
;
393 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
395 if (u
->memchunk
.length
<= 0)
396 pa_sink_render(u
->sink
, (size_t) n
* u
->frame_size
, &u
->memchunk
);
398 pa_assert(u
->memchunk
.length
> 0);
400 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
405 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
406 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
407 pa_memblock_release(u
->memchunk
.memblock
);
409 pa_assert(frames
!= 0);
411 if (PA_UNLIKELY(frames
< 0)) {
413 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
419 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
420 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
422 if (u
->memchunk
.length
<= 0) {
423 pa_memblock_unref(u
->memchunk
.memblock
);
424 pa_memchunk_reset(&u
->memchunk
);
429 u
->frame_index
+= frames
;
430 u
->since_start
+= (size_t) frames
* u
->frame_size
;
432 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
441 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
445 static void update_smoother(struct userdata
*u
) {
446 snd_pcm_sframes_t delay
= 0;
449 pa_usec_t now1
, now2
;
450 /* struct timeval timestamp; */
451 snd_pcm_status_t
*status
;
453 snd_pcm_status_alloca(&status
);
456 pa_assert(u
->pcm_handle
);
458 /* Let's update the time smoother */
460 snd_pcm_hwsync(u
->pcm_handle
);
461 snd_pcm_avail_update(u
->pcm_handle
);
463 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
464 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
468 /* delay = snd_pcm_status_get_delay(status); */
470 if (PA_UNLIKELY((err
= snd_pcm_delay(u
->pcm_handle
, &delay
)) < 0)) {
471 pa_log("Failed to query DSP status data: %s", snd_strerror(err
));
475 frames
= u
->frame_index
- delay
;
477 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
479 /* snd_pcm_status_get_tstamp(status, ×tamp); */
480 /* pa_rtclock_from_wallclock(×tamp); */
481 /* now1 = pa_timeval_load(×tamp); */
483 now1
= pa_rtclock_usec();
484 now2
= pa_bytes_to_usec((uint64_t) frames
* u
->frame_size
, &u
->sink
->sample_spec
);
485 pa_smoother_put(u
->smoother
, now1
, now2
);
488 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
491 pa_usec_t now1
, now2
;
495 now1
= pa_rtclock_usec();
496 now2
= pa_smoother_get(u
->smoother
, now1
);
498 delay
= (int64_t) pa_bytes_to_usec((uint64_t) u
->frame_index
* u
->frame_size
, &u
->sink
->sample_spec
) - (int64_t) now2
;
501 r
= (pa_usec_t
) delay
;
503 if (u
->memchunk
.memblock
)
504 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
509 static int build_pollfd(struct userdata
*u
) {
511 pa_assert(u
->pcm_handle
);
513 if (u
->alsa_rtpoll_item
)
514 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
516 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
522 static int suspend(struct userdata
*u
) {
524 pa_assert(u
->pcm_handle
);
526 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
528 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
529 * take awfully long with our long buffer sizes today. */
530 snd_pcm_close(u
->pcm_handle
);
531 u
->pcm_handle
= NULL
;
533 if (u
->alsa_rtpoll_item
) {
534 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
535 u
->alsa_rtpoll_item
= NULL
;
538 pa_log_info("Device suspended...");
543 static int update_sw_params(struct userdata
*u
) {
544 snd_pcm_uframes_t avail_min
;
549 /* Use the full buffer if noone asked us for anything specific */
550 u
->hwbuf_unused_frames
= 0;
555 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
558 pa_log_debug("latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
560 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
562 /* We need at least one sample in our buffer */
564 if (PA_UNLIKELY(b
< u
->frame_size
))
567 u
->hwbuf_unused_frames
= (snd_pcm_sframes_t
)
568 (PA_LIKELY(b
< u
->hwbuf_size
) ?
569 ((u
->hwbuf_size
- b
) / u
->frame_size
) : 0);
572 fix_tsched_watermark(u
);
575 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u
->hwbuf_unused_frames
);
577 /* We need at last one frame in the used part of the buffer */
578 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused_frames
+ 1;
581 pa_usec_t sleep_usec
, process_usec
;
583 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
584 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
587 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
589 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
590 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
594 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
- (size_t) u
->hwbuf_unused_frames
* u
->frame_size
);
599 static int unsuspend(struct userdata
*u
) {
604 snd_pcm_uframes_t period_size
;
607 pa_assert(!u
->pcm_handle
);
609 pa_log_info("Trying resume...");
611 snd_config_update_free_global();
612 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
613 /*SND_PCM_NONBLOCK|*/
614 SND_PCM_NO_AUTO_RESAMPLE
|
615 SND_PCM_NO_AUTO_CHANNELS
|
616 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
617 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
621 ss
= u
->sink
->sample_spec
;
622 nfrags
= u
->nfragments
;
623 period_size
= u
->fragment_size
/ u
->frame_size
;
627 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
628 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
632 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
633 pa_log_warn("Resume failed, couldn't get original access mode.");
637 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
638 pa_log_warn("Resume failed, couldn't restore original sample settings.");
642 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
643 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
644 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
645 (unsigned long) nfrags
, period_size
* u
->frame_size
);
649 if (update_sw_params(u
) < 0)
652 if (build_pollfd(u
) < 0)
655 /* FIXME: We need to reload the volume somehow */
660 pa_log_info("Resumed successfully...");
666 snd_pcm_close(u
->pcm_handle
);
667 u
->pcm_handle
= NULL
;
673 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
674 struct userdata
*u
= PA_SINK(o
)->userdata
;
678 case PA_SINK_MESSAGE_GET_LATENCY
: {
682 r
= sink_get_latency(u
);
684 *((pa_usec_t
*) data
) = r
;
689 case PA_SINK_MESSAGE_SET_STATE
:
691 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
693 case PA_SINK_SUSPENDED
:
694 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
702 case PA_SINK_RUNNING
:
704 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
705 if (build_pollfd(u
) < 0)
709 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
710 if (unsuspend(u
) < 0)
716 case PA_SINK_UNLINKED
:
718 case PA_SINK_INVALID_STATE
:
725 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
728 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
729 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
732 pa_assert(u
->mixer_handle
);
734 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
737 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
738 pa_sink_get_volume(u
->sink
, TRUE
);
739 pa_sink_get_mute(u
->sink
, TRUE
);
745 static pa_volume_t
from_alsa_volume(struct userdata
*u
, long alsa_vol
) {
747 return (pa_volume_t
) round(((double) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) /
748 (double) (u
->hw_volume_max
- u
->hw_volume_min
));
751 static long to_alsa_volume(struct userdata
*u
, pa_volume_t vol
) {
754 alsa_vol
= (long) round(((double) vol
* (double) (u
->hw_volume_max
- u
->hw_volume_min
))
755 / PA_VOLUME_NORM
) + u
->hw_volume_min
;
757 return PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
760 static void sink_get_volume_cb(pa_sink
*s
) {
761 struct userdata
*u
= s
->userdata
;
765 char t
[PA_CVOLUME_SNPRINT_MAX
];
768 pa_assert(u
->mixer_elem
);
770 if (u
->mixer_seperate_channels
) {
772 r
.channels
= s
->sample_spec
.channels
;
774 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
777 if (u
->hw_dB_supported
) {
779 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
782 #ifdef HAVE_VALGRIND_MEMCHECK_H
783 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
786 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
789 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
792 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
799 if (u
->hw_dB_supported
) {
801 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
804 #ifdef HAVE_VALGRIND_MEMCHECK_H
805 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
808 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
812 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
815 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
819 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
821 if (!pa_cvolume_equal(&u
->hardware_volume
, &r
)) {
823 s
->virtual_volume
= u
->hardware_volume
= r
;
825 if (u
->hw_dB_supported
) {
828 /* Hmm, so the hardware volume changed, let's reset our software volume */
829 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
830 pa_sink_set_soft_volume(s
, &reset
);
837 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
840 static void sink_set_volume_cb(pa_sink
*s
) {
841 struct userdata
*u
= s
->userdata
;
847 pa_assert(u
->mixer_elem
);
849 if (u
->mixer_seperate_channels
) {
851 r
.channels
= s
->sample_spec
.channels
;
853 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
857 vol
= s
->virtual_volume
.values
[i
];
859 if (u
->hw_dB_supported
) {
861 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
862 alsa_vol
+= u
->hw_dB_max
;
863 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
865 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, 1)) < 0)
868 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
871 #ifdef HAVE_VALGRIND_MEMCHECK_H
872 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
875 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
878 alsa_vol
= to_alsa_volume(u
, vol
);
880 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
883 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
886 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
894 vol
= pa_cvolume_max(&s
->virtual_volume
);
896 if (u
->hw_dB_supported
) {
897 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
898 alsa_vol
+= u
->hw_dB_max
;
899 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
901 if ((err
= snd_mixer_selem_set_playback_dB_all(u
->mixer_elem
, alsa_vol
, 1)) < 0)
904 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
907 #ifdef HAVE_VALGRIND_MEMCHECK_H
908 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
911 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
914 alsa_vol
= to_alsa_volume(u
, vol
);
916 if ((err
= snd_mixer_selem_set_playback_volume_all(u
->mixer_elem
, alsa_vol
)) < 0)
919 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
922 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
926 u
->hardware_volume
= r
;
928 if (u
->hw_dB_supported
) {
929 char t
[PA_CVOLUME_SNPRINT_MAX
];
931 /* Match exactly what the user requested by software */
932 pa_sw_cvolume_divide(&s
->soft_volume
, &s
->virtual_volume
, &u
->hardware_volume
);
934 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->virtual_volume
));
935 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
936 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->soft_volume
));
940 /* We can't match exactly what the user requested, hence let's
941 * at least tell the user about it */
943 s
->virtual_volume
= r
;
948 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
951 static void sink_get_mute_cb(pa_sink
*s
) {
952 struct userdata
*u
= s
->userdata
;
956 pa_assert(u
->mixer_elem
);
958 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
959 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
966 static void sink_set_mute_cb(pa_sink
*s
) {
967 struct userdata
*u
= s
->userdata
;
971 pa_assert(u
->mixer_elem
);
973 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
974 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
979 static void sink_update_requested_latency_cb(pa_sink
*s
) {
980 struct userdata
*u
= s
->userdata
;
981 snd_pcm_sframes_t before
;
987 before
= u
->hwbuf_unused_frames
;
990 /* Let's check whether we now use only a smaller part of the
991 buffer then before. If so, we need to make sure that subsequent
992 rewinds are relative to the new maxium fill level and not to the
993 current fill level. Thus, let's do a full rewind once, to clear
996 if (u
->hwbuf_unused_frames
> before
) {
997 pa_log_debug("Requesting rewind due to latency change.");
998 pa_sink_request_rewind(s
, (size_t) -1);
1002 static int process_rewind(struct userdata
*u
) {
1003 snd_pcm_sframes_t unused
;
1004 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1007 /* Figure out how much we shall rewind and reset the counter */
1008 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1009 u
->sink
->thread_info
.rewind_nbytes
= 0;
1011 if (rewind_nbytes
<= 0)
1014 pa_assert(rewind_nbytes
> 0);
1015 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1017 snd_pcm_hwsync(u
->pcm_handle
);
1018 if ((unused
= snd_pcm_avail_update(u
->pcm_handle
)) < 0) {
1019 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused
));
1023 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1025 if (u
->hwbuf_size
> unused_nbytes
)
1026 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1030 if (rewind_nbytes
> limit_nbytes
)
1031 rewind_nbytes
= limit_nbytes
;
1033 if (rewind_nbytes
> 0) {
1034 snd_pcm_sframes_t in_frames
, out_frames
;
1036 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1038 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1039 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1040 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1041 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames
));
1044 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1046 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1048 if (rewind_nbytes
<= 0)
1049 pa_log_info("Tried rewind, but was apparently not possible.");
1051 u
->frame_index
-= out_frames
;
1052 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1053 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1055 u
->after_rewind
= TRUE
;
1059 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1063 pa_sink_process_rewind(u
->sink
, 0);
1069 static void thread_func(void *userdata
) {
1070 struct userdata
*u
= userdata
;
1071 unsigned short revents
= 0;
1075 pa_log_debug("Thread starting up");
1077 if (u
->core
->realtime_scheduling
)
1078 pa_make_realtime(u
->core
->realtime_priority
);
1080 pa_thread_mq_install(&u
->thread_mq
);
1081 pa_rtpoll_install(u
->rtpoll
);
1086 /* pa_log_debug("loop"); */
1088 /* Render some data and write it to the dsp */
1089 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1091 pa_usec_t sleep_usec
= 0;
1093 if (u
->sink
->thread_info
.rewind_requested
)
1094 if (process_rewind(u
) < 0)
1098 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1100 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1105 /* pa_log_debug("work_done = %i", work_done); */
1110 pa_log_info("Starting playback.");
1111 snd_pcm_start(u
->pcm_handle
);
1113 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
1119 if (u
->use_tsched
) {
1122 if (u
->since_start
<= u
->hwbuf_size
) {
1124 /* USB devices on ALSA seem to hit a buffer
1125 * underrun during the first iterations much
1126 * quicker then we calculate here, probably due to
1127 * the transport latency. To accomodate for that
1128 * we artificially decrease the sleep time until
1129 * we have filled the buffer at least once
1132 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1136 /* OK, the playback buffer is now full, let's
1137 * calculate when to wake up next */
1138 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1140 /* Convert from the sound card time domain to the
1141 * system time domain */
1142 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), sleep_usec
);
1144 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1146 /* We don't trust the conversion, so we wake up whatever comes first */
1147 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1151 u
->after_rewind
= FALSE
;
1153 } else if (u
->use_tsched
)
1155 /* OK, we're in an invalid state, let's disable our timers */
1156 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1158 /* Hmm, nothing to do. Let's sleep */
1159 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1165 /* Tell ALSA about this and process its response */
1166 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1167 struct pollfd
*pollfd
;
1171 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1173 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1174 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
1178 if (revents
& (POLLIN
|POLLERR
|POLLNVAL
|POLLHUP
|POLLPRI
)) {
1179 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1186 if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1187 pa_log_debug("Wakeup from ALSA!%s%s", (revents
& POLLIN
) ? " INPUT" : "", (revents
& POLLOUT
) ? " OUTPUT" : "");
1193 /* If this was no regular exit from the loop we have to continue
1194 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1195 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1196 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1199 pa_log_debug("Thread shutting down");
1202 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
) {
1208 pa_assert(device_name
);
1210 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1211 pa_sink_new_data_set_name(data
, n
);
1212 data
->namereg_fail
= TRUE
;
1216 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1217 data
->namereg_fail
= TRUE
;
1219 n
= device_id
? device_id
: device_name
;
1220 data
->namereg_fail
= FALSE
;
1223 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1224 pa_sink_new_data_set_name(data
, t
);
1228 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, const pa_alsa_profile_info
*profile
) {
1230 struct userdata
*u
= NULL
;
1231 const char *dev_id
= NULL
;
1234 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1235 snd_pcm_uframes_t period_frames
, tsched_frames
;
1237 snd_pcm_info_t
*pcm_info
= NULL
;
1239 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1241 pa_sink_new_data data
;
1243 snd_pcm_info_alloca(&pcm_info
);
1248 ss
= m
->core
->default_sample_spec
;
1249 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1250 pa_log("Failed to parse sample specification and channel map");
1254 frame_size
= pa_frame_size(&ss
);
1256 nfrags
= m
->core
->default_n_fragments
;
1257 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1259 frag_size
= (uint32_t) frame_size
;
1260 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1261 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1263 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1264 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1265 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1266 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1267 pa_log("Failed to parse buffer metrics");
1271 hwbuf_size
= frag_size
* nfrags
;
1272 period_frames
= frag_size
/frame_size
;
1273 tsched_frames
= tsched_size
/frame_size
;
1275 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1276 pa_log("Failed to parse mmap argument.");
1280 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1281 pa_log("Failed to parse tsched argument.");
1285 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1286 pa_log("Failed to parse ignore_dB argument.");
1290 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1291 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1295 u
= pa_xnew0(struct userdata
, 1);
1298 u
->use_mmap
= use_mmap
;
1299 u
->use_tsched
= use_tsched
;
1302 u
->after_rewind
= FALSE
;
1303 u
->rtpoll
= pa_rtpoll_new();
1304 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1305 u
->alsa_rtpoll_item
= NULL
;
1307 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
, 5);
1308 usec
= pa_rtclock_usec();
1309 pa_smoother_set_time_offset(u
->smoother
, usec
);
1310 pa_smoother_pause(u
->smoother
, usec
);
1317 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1318 pa_log("device_id= not set");
1322 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_profile(
1326 SND_PCM_STREAM_PLAYBACK
,
1327 &nfrags
, &period_frames
, tsched_frames
,
1332 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1334 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1338 SND_PCM_STREAM_PLAYBACK
,
1339 &nfrags
, &period_frames
, tsched_frames
,
1346 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1347 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1350 SND_PCM_STREAM_PLAYBACK
,
1351 &nfrags
, &period_frames
, tsched_frames
,
1357 pa_assert(u
->device_name
);
1358 pa_log_info("Successfully opened device %s.", u
->device_name
);
1361 pa_log_info("Selected configuration '%s' (%s).", profile
->description
, profile
->name
);
1363 if (use_mmap
&& !b
) {
1364 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1365 u
->use_mmap
= use_mmap
= FALSE
;
1368 if (use_tsched
&& (!b
|| !d
)) {
1369 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1370 u
->use_tsched
= use_tsched
= FALSE
;
1374 pa_log_info("Successfully enabled mmap() mode.");
1377 pa_log_info("Successfully enabled timer-based scheduling mode.");
1379 if ((err
= snd_pcm_info(u
->pcm_handle
, pcm_info
)) < 0) {
1380 pa_log("Error fetching PCM info: %s", snd_strerror(err
));
1384 /* ALSA might tweak the sample spec, so recalculate the frame size */
1385 frame_size
= pa_frame_size(&ss
);
1387 if ((err
= snd_mixer_open(&u
->mixer_handle
, 0)) < 0)
1388 pa_log_warn("Error opening mixer: %s", snd_strerror(err
));
1390 pa_bool_t found
= FALSE
;
1392 if (pa_alsa_prepare_mixer(u
->mixer_handle
, u
->device_name
) >= 0)
1395 snd_pcm_info_t
*info
;
1397 snd_pcm_info_alloca(&info
);
1399 if (snd_pcm_info(u
->pcm_handle
, info
) >= 0) {
1403 if ((card_idx
= snd_pcm_info_get_card(info
)) >= 0) {
1405 md
= pa_sprintf_malloc("hw:%i", card_idx
);
1407 if (strcmp(u
->device_name
, md
))
1408 if (pa_alsa_prepare_mixer(u
->mixer_handle
, md
) >= 0)
1416 if (!(u
->mixer_elem
= pa_alsa_find_elem(u
->mixer_handle
, "Master", "PCM", TRUE
)))
1420 snd_mixer_close(u
->mixer_handle
);
1421 u
->mixer_handle
= NULL
;
1425 pa_sink_new_data_init(&data
);
1426 data
.driver
= driver
;
1429 set_sink_name(&data
, ma
, dev_id
, u
->device_name
);
1430 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1431 pa_sink_new_data_set_channel_map(&data
, &map
);
1433 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, pcm_info
);
1434 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1435 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1436 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1437 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1440 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, profile
->name
);
1441 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, profile
->description
);
1444 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
);
1445 pa_sink_new_data_done(&data
);
1448 pa_log("Failed to create sink object");
1452 u
->sink
->parent
.process_msg
= sink_process_msg
;
1453 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1454 u
->sink
->userdata
= u
;
1456 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1457 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1459 u
->frame_size
= frame_size
;
1460 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1461 u
->nfragments
= nfrags
;
1462 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1463 u
->hwbuf_unused_frames
= 0;
1464 u
->tsched_watermark
= tsched_watermark
;
1466 u
->hw_dB_supported
= FALSE
;
1467 u
->hw_dB_min
= u
->hw_dB_max
= 0;
1468 u
->hw_volume_min
= u
->hw_volume_max
= 0;
1469 u
->mixer_seperate_channels
= FALSE
;
1470 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1473 fix_tsched_watermark(u
);
1475 u
->sink
->thread_info
.max_rewind
= use_tsched
? u
->hwbuf_size
: 0;
1476 u
->sink
->thread_info
.max_request
= u
->hwbuf_size
;
1478 pa_sink_set_latency_range(u
->sink
,
1479 !use_tsched
? pa_bytes_to_usec(u
->hwbuf_size
, &ss
) : (pa_usec_t
) -1,
1480 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1482 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1483 nfrags
, (long unsigned) u
->fragment_size
,
1484 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1487 pa_log_info("Time scheduling watermark is %0.2fms",
1488 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1490 if (update_sw_params(u
) < 0)
1493 pa_memchunk_reset(&u
->memchunk
);
1495 if (u
->mixer_handle
) {
1496 pa_assert(u
->mixer_elem
);
1498 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
)) {
1499 pa_bool_t suitable
= FALSE
;
1501 if (snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) < 0)
1502 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1503 else if (u
->hw_volume_min
>= u
->hw_volume_max
)
1504 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u
->hw_volume_min
, u
->hw_volume_max
);
1506 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1511 if (ignore_dB
|| snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) < 0)
1512 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1514 #ifdef HAVE_VALGRIND_MEMCHECK_H
1515 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_min
, sizeof(u
->hw_dB_min
));
1516 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_max
, sizeof(u
->hw_dB_max
));
1519 if (u
->hw_dB_min
>= u
->hw_dB_max
)
1520 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1522 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1523 u
->hw_dB_supported
= TRUE
;
1525 if (u
->hw_dB_max
> 0) {
1526 u
->sink
->base_volume
= pa_sw_volume_from_dB(- (double) u
->hw_dB_max
/100.0);
1527 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1529 pa_log_info("No particular base volume set, fixing to 0 dB");
1533 if (!u
->hw_dB_supported
&&
1534 u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1536 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1542 u
->mixer_seperate_channels
= pa_alsa_calc_mixer_map(u
->mixer_elem
, &map
, u
->mixer_map
, TRUE
) >= 0;
1544 u
->sink
->get_volume
= sink_get_volume_cb
;
1545 u
->sink
->set_volume
= sink_set_volume_cb
;
1546 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1547 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1549 if (!u
->hw_dB_supported
)
1550 u
->sink
->n_volume_steps
= u
->hw_volume_max
- u
->hw_volume_min
+ 1;
1552 pa_log_info("Using software volume control.");
1555 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1556 u
->sink
->get_mute
= sink_get_mute_cb
;
1557 u
->sink
->set_mute
= sink_set_mute_cb
;
1558 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1560 pa_log_info("Using software mute control.");
1562 u
->mixer_fdl
= pa_alsa_fdlist_new();
1564 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, m
->core
->mainloop
) < 0) {
1565 pa_log("Failed to initialize file descriptor monitoring");
1569 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1570 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1572 u
->mixer_fdl
= NULL
;
1574 pa_alsa_dump(u
->pcm_handle
);
1576 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1577 pa_log("Failed to create thread.");
1581 /* Get initial mixer settings */
1582 if (data
.volume_is_set
) {
1583 if (u
->sink
->set_volume
)
1584 u
->sink
->set_volume(u
->sink
);
1586 if (u
->sink
->get_volume
)
1587 u
->sink
->get_volume(u
->sink
);
1590 if (data
.muted_is_set
) {
1591 if (u
->sink
->set_mute
)
1592 u
->sink
->set_mute(u
->sink
);
1594 if (u
->sink
->get_mute
)
1595 u
->sink
->get_mute(u
->sink
);
1598 pa_sink_put(u
->sink
);
1609 static void userdata_free(struct userdata
*u
) {
1613 pa_sink_unlink(u
->sink
);
1616 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1617 pa_thread_free(u
->thread
);
1620 pa_thread_mq_done(&u
->thread_mq
);
1623 pa_sink_unref(u
->sink
);
1625 if (u
->memchunk
.memblock
)
1626 pa_memblock_unref(u
->memchunk
.memblock
);
1628 if (u
->alsa_rtpoll_item
)
1629 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1632 pa_rtpoll_free(u
->rtpoll
);
1635 pa_alsa_fdlist_free(u
->mixer_fdl
);
1637 if (u
->mixer_handle
)
1638 snd_mixer_close(u
->mixer_handle
);
1640 if (u
->pcm_handle
) {
1641 snd_pcm_drop(u
->pcm_handle
);
1642 snd_pcm_close(u
->pcm_handle
);
1646 pa_smoother_free(u
->smoother
);
1648 pa_xfree(u
->device_name
);
1652 void pa_alsa_sink_free(pa_sink
*s
) {
1655 pa_sink_assert_ref(s
);
1656 pa_assert_se(u
= s
->userdata
);