2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION
);
61 PA_MODULE_LOAD_ONCE(FALSE
);
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
77 static const char* const valid_modargs
[] = {
90 "tsched_buffer_watermark",
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
106 pa_thread_mq thread_mq
;
109 snd_pcm_t
*pcm_handle
;
111 pa_alsa_fdlist
*mixer_fdl
;
112 snd_mixer_t
*mixer_handle
;
113 snd_mixer_elem_t
*mixer_elem
;
114 long hw_volume_max
, hw_volume_min
;
115 long hw_dB_max
, hw_dB_min
;
116 pa_bool_t hw_dB_supported
;
117 pa_bool_t mixer_seperate_channels
;
118 pa_cvolume hardware_volume
;
120 size_t frame_size
, fragment_size
, hwbuf_size
, tsched_watermark
;
122 pa_memchunk memchunk
;
126 pa_bool_t use_mmap
, use_tsched
;
128 pa_bool_t first
, after_rewind
;
130 pa_rtpoll_item
*alsa_rtpoll_item
;
132 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
134 pa_smoother
*smoother
;
136 uint64_t since_start
;
138 snd_pcm_sframes_t hwbuf_unused_frames
;
141 static void fix_tsched_watermark(struct userdata
*u
) {
143 size_t min_sleep
, min_wakeup
;
146 max_use
= u
->hwbuf_size
- (size_t) u
->hwbuf_unused_frames
* u
->frame_size
;
148 min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
149 min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
151 if (min_sleep
> max_use
/2)
152 min_sleep
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
153 if (min_sleep
< u
->frame_size
)
154 min_sleep
= u
->frame_size
;
156 if (min_wakeup
> max_use
/2)
157 min_wakeup
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
158 if (min_wakeup
< u
->frame_size
)
159 min_wakeup
= u
->frame_size
;
161 if (u
->tsched_watermark
> max_use
-min_sleep
)
162 u
->tsched_watermark
= max_use
-min_sleep
;
164 if (u
->tsched_watermark
< min_wakeup
)
165 u
->tsched_watermark
= min_wakeup
;
168 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
171 pa_assert(sleep_usec
);
172 pa_assert(process_usec
);
176 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
178 if (usec
== (pa_usec_t
) -1)
179 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
183 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
186 *sleep_usec
= usec
- wm
;
189 *process_usec
= *sleep_usec
= usec
/ 2;
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
194 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
199 pa_log_debug("%s: %s", call
, snd_strerror(err
));
201 pa_assert(err
!= -EAGAIN
);
204 pa_log_debug("%s: Buffer underrun!", call
);
206 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) == 0) {
212 pa_log("%s: %s", call
, snd_strerror(err
));
216 static size_t check_left_to_play(struct userdata
*u
, snd_pcm_sframes_t n
) {
219 if ((size_t) n
*u
->frame_size
< u
->hwbuf_size
)
220 left_to_play
= u
->hwbuf_size
- ((size_t) n
*u
->frame_size
);
224 if (left_to_play
> 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u
->first
&& !u
->after_rewind
) {
227 pa_log_info("Underrun!");
230 size_t old_watermark
= u
->tsched_watermark
;
232 u
->tsched_watermark
*= 2;
233 fix_tsched_watermark(u
);
235 if (old_watermark
!= u
->tsched_watermark
)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
244 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
246 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
250 pa_sink_assert_ref(u
->sink
);
253 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
259 snd_pcm_hwsync(u
->pcm_handle
);
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
264 if (PA_UNLIKELY((n
= pa_alsa_safe_avail_update(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
266 if ((r
= try_recover(u
, "snd_pcm_avail_update", (int) n
)) == 0)
272 left_to_play
= check_left_to_play(u
, n
);
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
282 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
285 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
288 n
-= u
->hwbuf_unused_frames
;
290 /* pa_log_debug("Filling up"); */
296 const snd_pcm_channel_area_t
*areas
;
297 snd_pcm_uframes_t offset
, frames
= (snd_pcm_uframes_t
) n
;
298 snd_pcm_sframes_t sframes
;
300 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
302 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
304 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
310 /* Make sure that if these memblocks need to be copied they will fit into one slot */
311 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
312 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
314 /* Check these are multiples of 8 bit */
315 pa_assert((areas
[0].first
& 7) == 0);
316 pa_assert((areas
[0].step
& 7)== 0);
318 /* We assume a single interleaved memory buffer */
319 pa_assert((areas
[0].first
>> 3) == 0);
320 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
322 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
324 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
325 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
328 pa_sink_render_into_full(u
->sink
, &chunk
);
330 /* FIXME: Maybe we can do something to keep this memory block
331 * a little bit longer around? */
332 pa_memblock_unref_fixed(chunk
.memblock
);
334 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
336 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
344 u
->frame_index
+= (int64_t) frames
;
345 u
->since_start
+= frames
* u
->frame_size
;
347 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
349 if (frames
>= (snd_pcm_uframes_t
) n
)
352 n
-= (snd_pcm_sframes_t
) frames
;
356 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
360 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
362 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
366 pa_sink_assert_ref(u
->sink
);
369 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
375 snd_pcm_hwsync(u
->pcm_handle
);
377 if (PA_UNLIKELY((n
= pa_alsa_safe_avail_update(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
379 if ((r
= try_recover(u
, "snd_pcm_avail_update", (int) n
)) == 0)
385 left_to_play
= check_left_to_play(u
, n
);
389 /* We won't fill up the playback buffer before at least
390 * half the sleep time is over because otherwise we might
391 * ask for more data from the clients then they expect. We
392 * need to guarantee that clients only have to keep around
393 * a single hw buffer length. */
395 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
398 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
401 n
-= u
->hwbuf_unused_frames
;
404 snd_pcm_sframes_t frames
;
407 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
409 if (u
->memchunk
.length
<= 0)
410 pa_sink_render(u
->sink
, (size_t) n
* u
->frame_size
, &u
->memchunk
);
412 pa_assert(u
->memchunk
.length
> 0);
414 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
419 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
420 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
421 pa_memblock_release(u
->memchunk
.memblock
);
423 pa_assert(frames
!= 0);
425 if (PA_UNLIKELY(frames
< 0)) {
427 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
433 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
434 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
436 if (u
->memchunk
.length
<= 0) {
437 pa_memblock_unref(u
->memchunk
.memblock
);
438 pa_memchunk_reset(&u
->memchunk
);
443 u
->frame_index
+= frames
;
444 u
->since_start
+= (size_t) frames
* u
->frame_size
;
446 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
455 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
459 static void update_smoother(struct userdata
*u
) {
460 snd_pcm_sframes_t delay
= 0;
463 pa_usec_t now1
, now2
;
464 /* struct timeval timestamp; */
465 snd_pcm_status_t
*status
;
467 snd_pcm_status_alloca(&status
);
470 pa_assert(u
->pcm_handle
);
472 /* Let's update the time smoother */
474 snd_pcm_hwsync(u
->pcm_handle
);
475 snd_pcm_avail_update(u
->pcm_handle
);
477 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
478 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
482 /* delay = snd_pcm_status_get_delay(status); */
484 if (PA_UNLIKELY((err
= snd_pcm_delay(u
->pcm_handle
, &delay
)) < 0)) {
485 pa_log("Failed to query DSP status data: %s", snd_strerror(err
));
489 frames
= u
->frame_index
- delay
;
491 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
493 /* snd_pcm_status_get_tstamp(status, ×tamp); */
494 /* pa_rtclock_from_wallclock(×tamp); */
495 /* now1 = pa_timeval_load(×tamp); */
497 now1
= pa_rtclock_usec();
498 now2
= pa_bytes_to_usec((uint64_t) frames
* u
->frame_size
, &u
->sink
->sample_spec
);
499 pa_smoother_put(u
->smoother
, now1
, now2
);
502 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
505 pa_usec_t now1
, now2
;
509 now1
= pa_rtclock_usec();
510 now2
= pa_smoother_get(u
->smoother
, now1
);
512 delay
= (int64_t) pa_bytes_to_usec((uint64_t) u
->frame_index
* u
->frame_size
, &u
->sink
->sample_spec
) - (int64_t) now2
;
515 r
= (pa_usec_t
) delay
;
517 if (u
->memchunk
.memblock
)
518 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
523 static int build_pollfd(struct userdata
*u
) {
525 pa_assert(u
->pcm_handle
);
527 if (u
->alsa_rtpoll_item
)
528 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
530 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
536 static int suspend(struct userdata
*u
) {
538 pa_assert(u
->pcm_handle
);
540 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
542 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
543 * take awfully long with our long buffer sizes today. */
544 snd_pcm_close(u
->pcm_handle
);
545 u
->pcm_handle
= NULL
;
547 if (u
->alsa_rtpoll_item
) {
548 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
549 u
->alsa_rtpoll_item
= NULL
;
552 pa_log_info("Device suspended...");
557 static int update_sw_params(struct userdata
*u
) {
558 snd_pcm_uframes_t avail_min
;
563 /* Use the full buffer if noone asked us for anything specific */
564 u
->hwbuf_unused_frames
= 0;
569 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
572 pa_log_debug("latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
574 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
576 /* We need at least one sample in our buffer */
578 if (PA_UNLIKELY(b
< u
->frame_size
))
581 u
->hwbuf_unused_frames
= (snd_pcm_sframes_t
)
582 (PA_LIKELY(b
< u
->hwbuf_size
) ?
583 ((u
->hwbuf_size
- b
) / u
->frame_size
) : 0);
585 fix_tsched_watermark(u
);
589 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u
->hwbuf_unused_frames
);
591 /* We need at last one frame in the used part of the buffer */
592 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused_frames
+ 1;
595 pa_usec_t sleep_usec
, process_usec
;
597 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
598 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
);
601 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
603 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
604 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
608 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
- (size_t) u
->hwbuf_unused_frames
* u
->frame_size
);
613 static int unsuspend(struct userdata
*u
) {
618 snd_pcm_uframes_t period_size
;
621 pa_assert(!u
->pcm_handle
);
623 pa_log_info("Trying resume...");
625 snd_config_update_free_global();
626 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
627 /*SND_PCM_NONBLOCK|*/
628 SND_PCM_NO_AUTO_RESAMPLE
|
629 SND_PCM_NO_AUTO_CHANNELS
|
630 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
631 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
635 ss
= u
->sink
->sample_spec
;
636 nfrags
= u
->nfragments
;
637 period_size
= u
->fragment_size
/ u
->frame_size
;
641 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
642 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
646 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
647 pa_log_warn("Resume failed, couldn't get original access mode.");
651 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
652 pa_log_warn("Resume failed, couldn't restore original sample settings.");
656 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
657 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
658 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
659 (unsigned long) nfrags
, period_size
* u
->frame_size
);
663 if (update_sw_params(u
) < 0)
666 if (build_pollfd(u
) < 0)
669 /* FIXME: We need to reload the volume somehow */
674 pa_log_info("Resumed successfully...");
680 snd_pcm_close(u
->pcm_handle
);
681 u
->pcm_handle
= NULL
;
687 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
688 struct userdata
*u
= PA_SINK(o
)->userdata
;
692 case PA_SINK_MESSAGE_GET_LATENCY
: {
696 r
= sink_get_latency(u
);
698 *((pa_usec_t
*) data
) = r
;
703 case PA_SINK_MESSAGE_SET_STATE
:
705 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
707 case PA_SINK_SUSPENDED
:
708 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
716 case PA_SINK_RUNNING
:
718 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
719 if (build_pollfd(u
) < 0)
723 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
724 if (unsuspend(u
) < 0)
730 case PA_SINK_UNLINKED
:
738 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
741 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
742 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
745 pa_assert(u
->mixer_handle
);
747 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
750 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
751 pa_sink_get_volume(u
->sink
, TRUE
);
752 pa_sink_get_mute(u
->sink
, TRUE
);
758 static pa_volume_t
from_alsa_volume(struct userdata
*u
, long alsa_vol
) {
760 return (pa_volume_t
) round(((double) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) /
761 (double) (u
->hw_volume_max
- u
->hw_volume_min
));
764 static long to_alsa_volume(struct userdata
*u
, pa_volume_t vol
) {
767 alsa_vol
= (long) round(((double) vol
* (double) (u
->hw_volume_max
- u
->hw_volume_min
))
768 / PA_VOLUME_NORM
) + u
->hw_volume_min
;
770 return PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
773 static int sink_get_volume_cb(pa_sink
*s
) {
774 struct userdata
*u
= s
->userdata
;
778 char t
[PA_CVOLUME_SNPRINT_MAX
];
781 pa_assert(u
->mixer_elem
);
783 if (u
->mixer_seperate_channels
) {
785 r
.channels
= s
->sample_spec
.channels
;
787 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
790 if (u
->hw_dB_supported
) {
792 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
795 #ifdef HAVE_VALGRIND_MEMCHECK_H
796 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
799 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
802 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
805 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
812 if (u
->hw_dB_supported
) {
814 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
817 #ifdef HAVE_VALGRIND_MEMCHECK_H
818 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
821 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
825 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
828 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
832 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
834 if (!pa_cvolume_equal(&u
->hardware_volume
, &r
)) {
836 u
->hardware_volume
= s
->volume
= r
;
838 if (u
->hw_dB_supported
) {
841 /* Hmm, so the hardware volume changed, let's reset our software volume */
843 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
844 pa_sink_set_soft_volume(s
, &reset
);
851 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
856 static int sink_set_volume_cb(pa_sink
*s
) {
857 struct userdata
*u
= s
->userdata
;
863 pa_assert(u
->mixer_elem
);
865 if (u
->mixer_seperate_channels
) {
867 r
.channels
= s
->sample_spec
.channels
;
869 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
873 vol
= s
->volume
.values
[i
];
875 if (u
->hw_dB_supported
) {
877 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
878 alsa_vol
+= u
->hw_dB_max
;
879 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
881 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, 1)) < 0)
884 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
887 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
890 alsa_vol
= to_alsa_volume(u
, vol
);
892 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
895 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
898 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
906 vol
= pa_cvolume_max(&s
->volume
);
908 if (u
->hw_dB_supported
) {
909 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
910 alsa_vol
+= u
->hw_dB_max
;
911 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
913 if ((err
= snd_mixer_selem_set_playback_dB_all(u
->mixer_elem
, alsa_vol
, 1)) < 0)
916 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
919 pa_cvolume_set(&r
, s
->volume
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
922 alsa_vol
= to_alsa_volume(u
, vol
);
924 if ((err
= snd_mixer_selem_set_playback_volume_all(u
->mixer_elem
, alsa_vol
)) < 0)
927 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
930 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
934 u
->hardware_volume
= r
;
936 if (u
->hw_dB_supported
) {
937 char t
[PA_CVOLUME_SNPRINT_MAX
];
939 /* Match exactly what the user requested by software */
941 pa_sw_cvolume_divide(&r
, &s
->volume
, &r
);
942 pa_sink_set_soft_volume(s
, &r
);
944 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->volume
));
945 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
946 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
950 /* We can't match exactly what the user requested, hence let's
951 * at least tell the user about it */
958 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
963 static int sink_get_mute_cb(pa_sink
*s
) {
964 struct userdata
*u
= s
->userdata
;
968 pa_assert(u
->mixer_elem
);
970 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
971 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
980 static int sink_set_mute_cb(pa_sink
*s
) {
981 struct userdata
*u
= s
->userdata
;
985 pa_assert(u
->mixer_elem
);
987 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
988 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
995 static void sink_update_requested_latency_cb(pa_sink
*s
) {
996 struct userdata
*u
= s
->userdata
;
997 snd_pcm_sframes_t before
;
1003 before
= u
->hwbuf_unused_frames
;
1004 update_sw_params(u
);
1006 /* Let's check whether we now use only a smaller part of the
1007 buffer then before. If so, we need to make sure that subsequent
1008 rewinds are relative to the new maxium fill level and not to the
1009 current fill level. Thus, let's do a full rewind once, to clear
1012 if (u
->hwbuf_unused_frames
> before
) {
1013 pa_log_debug("Requesting rewind due to latency change.");
1014 pa_sink_request_rewind(s
, (size_t) -1);
1018 static int process_rewind(struct userdata
*u
) {
1019 snd_pcm_sframes_t unused
;
1020 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1023 /* Figure out how much we shall rewind and reset the counter */
1024 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1025 u
->sink
->thread_info
.rewind_nbytes
= 0;
1027 if (rewind_nbytes
<= 0)
1030 pa_assert(rewind_nbytes
> 0);
1031 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1033 snd_pcm_hwsync(u
->pcm_handle
);
1034 if ((unused
= snd_pcm_avail_update(u
->pcm_handle
)) < 0) {
1035 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused
));
1039 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1041 if (u
->hwbuf_size
> unused_nbytes
)
1042 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1046 if (rewind_nbytes
> limit_nbytes
)
1047 rewind_nbytes
= limit_nbytes
;
1049 if (rewind_nbytes
> 0) {
1050 snd_pcm_sframes_t in_frames
, out_frames
;
1052 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1054 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1055 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1056 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1057 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames
));
1060 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1062 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1064 if (rewind_nbytes
<= 0)
1065 pa_log_info("Tried rewind, but was apparently not possible.");
1067 u
->frame_index
-= out_frames
;
1068 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1069 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1071 u
->after_rewind
= TRUE
;
1075 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1079 pa_sink_process_rewind(u
->sink
, 0);
1085 static void thread_func(void *userdata
) {
1086 struct userdata
*u
= userdata
;
1090 pa_log_debug("Thread starting up");
1092 if (u
->core
->realtime_scheduling
)
1093 pa_make_realtime(u
->core
->realtime_priority
);
1095 pa_thread_mq_install(&u
->thread_mq
);
1096 pa_rtpoll_install(u
->rtpoll
);
1101 /* pa_log_debug("loop"); */
1103 /* Render some data and write it to the dsp */
1104 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1106 pa_usec_t sleep_usec
= 0;
1108 if (u
->sink
->thread_info
.rewind_requested
)
1109 if (process_rewind(u
) < 0)
1113 work_done
= mmap_write(u
, &sleep_usec
);
1115 work_done
= unix_write(u
, &sleep_usec
);
1120 /* pa_log_debug("work_done = %i", work_done); */
1125 pa_log_info("Starting playback.");
1126 snd_pcm_start(u
->pcm_handle
);
1128 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
1134 if (u
->use_tsched
) {
1137 if (u
->since_start
<= u
->hwbuf_size
) {
1139 /* USB devices on ALSA seem to hit a buffer
1140 * underrun during the first iterations much
1141 * quicker then we calculate here, probably due to
1142 * the transport latency. To accomodate for that
1143 * we artificially decrease the sleep time until
1144 * we have filled the buffer at least once
1147 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1151 /* OK, the playback buffer is now full, let's
1152 * calculate when to wake up next */
1153 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1155 /* Convert from the sound card time domain to the
1156 * system time domain */
1157 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), sleep_usec
);
1159 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1161 /* We don't trust the conversion, so we wake up whatever comes first */
1162 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1166 u
->after_rewind
= FALSE
;
1168 } else if (u
->use_tsched
)
1170 /* OK, we're in an invalid state, let's disable our timers */
1171 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1173 /* Hmm, nothing to do. Let's sleep */
1174 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1180 /* Tell ALSA about this and process its response */
1181 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1182 struct pollfd
*pollfd
;
1183 unsigned short revents
= 0;
1187 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1189 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1190 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
1194 if (revents
& (POLLERR
|POLLNVAL
|POLLHUP
|POLLPRI
)) {
1195 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1202 if (revents
&& u
->use_tsched
)
1203 pa_log_debug("Wakeup from ALSA!%s%s", (revents
& POLLIN
) ? " INPUT" : "", (revents
& POLLOUT
) ? " OUTPUT" : "");
1208 /* If this was no regular exit from the loop we have to continue
1209 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1210 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1211 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1214 pa_log_debug("Thread shutting down");
1217 int pa__init(pa_module
*m
) {
1219 pa_modargs
*ma
= NULL
;
1220 struct userdata
*u
= NULL
;
1224 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1225 snd_pcm_uframes_t period_frames
, tsched_frames
;
1227 snd_pcm_info_t
*pcm_info
= NULL
;
1230 char *name_buf
= NULL
;
1231 pa_bool_t namereg_fail
;
1232 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
;
1234 pa_sink_new_data data
;
1236 snd_pcm_info_alloca(&pcm_info
);
1240 pa_alsa_redirect_errors_inc();
1242 if (!(ma
= pa_modargs_new(m
->argument
, valid_modargs
))) {
1243 pa_log("Failed to parse module arguments");
1247 ss
= m
->core
->default_sample_spec
;
1248 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1249 pa_log("Failed to parse sample specification and channel map");
1253 frame_size
= pa_frame_size(&ss
);
1255 nfrags
= m
->core
->default_n_fragments
;
1256 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1258 frag_size
= (uint32_t) frame_size
;
1259 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1260 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1262 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1263 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1264 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1265 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1266 pa_log("Failed to parse buffer metrics");
1270 hwbuf_size
= frag_size
* nfrags
;
1271 period_frames
= frag_size
/frame_size
;
1272 tsched_frames
= tsched_size
/frame_size
;
1274 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1275 pa_log("Failed to parse mmap argument.");
1279 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1280 pa_log("Failed to parse tsched argument.");
1284 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1285 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1289 u
= pa_xnew0(struct userdata
, 1);
1293 u
->use_mmap
= use_mmap
;
1294 u
->use_tsched
= use_tsched
;
1297 u
->after_rewind
= FALSE
;
1298 u
->rtpoll
= pa_rtpoll_new();
1299 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1300 u
->alsa_rtpoll_item
= NULL
;
1302 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
, 5);
1303 usec
= pa_rtclock_usec();
1304 pa_smoother_set_time_offset(u
->smoother
, usec
);
1305 pa_smoother_pause(u
->smoother
, usec
);
1307 snd_config_update_free_global();
1312 if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1314 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id(
1318 SND_PCM_STREAM_PLAYBACK
,
1319 &nfrags
, &period_frames
, tsched_frames
,
1326 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1327 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1330 SND_PCM_STREAM_PLAYBACK
,
1331 &nfrags
, &period_frames
, tsched_frames
,
1337 pa_assert(u
->device_name
);
1338 pa_log_info("Successfully opened device %s.", u
->device_name
);
1340 if (use_mmap
&& !b
) {
1341 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1342 u
->use_mmap
= use_mmap
= FALSE
;
1345 if (use_tsched
&& (!b
|| !d
)) {
1346 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1347 u
->use_tsched
= use_tsched
= FALSE
;
1351 pa_log_info("Successfully enabled mmap() mode.");
1354 pa_log_info("Successfully enabled timer-based scheduling mode.");
1356 if ((err
= snd_pcm_info(u
->pcm_handle
, pcm_info
)) < 0) {
1357 pa_log("Error fetching PCM info: %s", snd_strerror(err
));
1361 /* ALSA might tweak the sample spec, so recalculate the frame size */
1362 frame_size
= pa_frame_size(&ss
);
1364 if ((err
= snd_mixer_open(&u
->mixer_handle
, 0)) < 0)
1365 pa_log_warn("Error opening mixer: %s", snd_strerror(err
));
1367 pa_bool_t found
= FALSE
;
1369 if (pa_alsa_prepare_mixer(u
->mixer_handle
, u
->device_name
) >= 0)
1372 snd_pcm_info_t
*info
;
1374 snd_pcm_info_alloca(&info
);
1376 if (snd_pcm_info(u
->pcm_handle
, info
) >= 0) {
1380 if ((card
= snd_pcm_info_get_card(info
)) >= 0) {
1382 md
= pa_sprintf_malloc("hw:%i", card
);
1384 if (strcmp(u
->device_name
, md
))
1385 if (pa_alsa_prepare_mixer(u
->mixer_handle
, md
) >= 0)
1393 if (!(u
->mixer_elem
= pa_alsa_find_elem(u
->mixer_handle
, "Master", "PCM")))
1397 snd_mixer_close(u
->mixer_handle
);
1398 u
->mixer_handle
= NULL
;
1402 if ((name
= pa_modargs_get_value(ma
, "sink_name", NULL
)))
1403 namereg_fail
= TRUE
;
1405 name
= name_buf
= pa_sprintf_malloc("alsa_output.%s", u
->device_name
);
1406 namereg_fail
= FALSE
;
1409 pa_sink_new_data_init(&data
);
1410 data
.driver
= __FILE__
;
1412 pa_sink_new_data_set_name(&data
, name
);
1413 data
.namereg_fail
= namereg_fail
;
1414 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1415 pa_sink_new_data_set_channel_map(&data
, &map
);
1417 pa_alsa_init_proplist(data
.proplist
, pcm_info
);
1418 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1419 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1420 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1421 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1423 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
);
1424 pa_sink_new_data_done(&data
);
1428 pa_log("Failed to create sink object");
1432 u
->sink
->parent
.process_msg
= sink_process_msg
;
1433 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1434 u
->sink
->userdata
= u
;
1436 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1437 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1439 u
->frame_size
= frame_size
;
1440 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1441 u
->nfragments
= nfrags
;
1442 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1443 u
->hwbuf_unused_frames
= 0;
1444 u
->tsched_watermark
= tsched_watermark
;
1446 u
->hw_dB_supported
= FALSE
;
1447 u
->hw_dB_min
= u
->hw_dB_max
= 0;
1448 u
->hw_volume_min
= u
->hw_volume_max
= 0;
1449 u
->mixer_seperate_channels
= FALSE
;
1450 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1453 fix_tsched_watermark(u
);
1455 u
->sink
->thread_info
.max_rewind
= use_tsched
? u
->hwbuf_size
: 0;
1456 u
->sink
->thread_info
.max_request
= u
->hwbuf_size
;
1458 pa_sink_set_latency_range(u
->sink
,
1459 !use_tsched
? pa_bytes_to_usec(u
->hwbuf_size
, &ss
) : (pa_usec_t
) -1,
1460 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1462 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1463 nfrags
, (long unsigned) u
->fragment_size
,
1464 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1467 pa_log_info("Time scheduling watermark is %0.2fms",
1468 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1470 if (update_sw_params(u
) < 0)
1473 pa_memchunk_reset(&u
->memchunk
);
1475 if (u
->mixer_handle
) {
1476 pa_assert(u
->mixer_elem
);
1478 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
)) {
1479 pa_bool_t suitable
= FALSE
;
1481 if (snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) < 0)
1482 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1483 else if (u
->hw_volume_min
>= u
->hw_volume_max
)
1484 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u
->hw_volume_min
, u
->hw_volume_max
);
1486 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1490 if (snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) < 0)
1491 pa_log_info("Mixer doesn't support dB information.");
1493 #ifdef HAVE_VALGRIND_MEMCHECK_H
1494 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_min
, sizeof(u
->hw_dB_min
));
1495 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_max
, sizeof(u
->hw_dB_max
));
1498 if (u
->hw_dB_min
>= u
->hw_dB_max
)
1499 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1501 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1502 u
->hw_dB_supported
= TRUE
;
1507 !u
->hw_dB_supported
&&
1508 u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1510 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1515 u
->mixer_seperate_channels
= pa_alsa_calc_mixer_map(u
->mixer_elem
, &map
, u
->mixer_map
, TRUE
) >= 0;
1517 u
->sink
->get_volume
= sink_get_volume_cb
;
1518 u
->sink
->set_volume
= sink_set_volume_cb
;
1519 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1520 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1523 pa_log_info("Using software volume control.");
1526 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1527 u
->sink
->get_mute
= sink_get_mute_cb
;
1528 u
->sink
->set_mute
= sink_set_mute_cb
;
1529 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1531 pa_log_info("Using software mute control.");
1533 u
->mixer_fdl
= pa_alsa_fdlist_new();
1535 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, m
->core
->mainloop
) < 0) {
1536 pa_log("Failed to initialize file descriptor monitoring");
1540 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1541 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1543 u
->mixer_fdl
= NULL
;
1545 pa_alsa_dump(u
->pcm_handle
);
1547 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1548 pa_log("Failed to create thread.");
1552 /* Get initial mixer settings */
1553 if (data
.volume_is_set
) {
1554 if (u
->sink
->set_volume
)
1555 u
->sink
->set_volume(u
->sink
);
1557 if (u
->sink
->get_volume
)
1558 u
->sink
->get_volume(u
->sink
);
1561 if (data
.muted_is_set
) {
1562 if (u
->sink
->set_mute
)
1563 u
->sink
->set_mute(u
->sink
);
1565 if (u
->sink
->get_mute
)
1566 u
->sink
->get_mute(u
->sink
);
1569 pa_sink_put(u
->sink
);
1571 pa_modargs_free(ma
);
1578 pa_modargs_free(ma
);
1585 void pa__done(pa_module
*m
) {
1590 if (!(u
= m
->userdata
)) {
1591 pa_alsa_redirect_errors_dec();
1596 pa_sink_unlink(u
->sink
);
1599 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1600 pa_thread_free(u
->thread
);
1603 pa_thread_mq_done(&u
->thread_mq
);
1606 pa_sink_unref(u
->sink
);
1608 if (u
->memchunk
.memblock
)
1609 pa_memblock_unref(u
->memchunk
.memblock
);
1611 if (u
->alsa_rtpoll_item
)
1612 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1615 pa_rtpoll_free(u
->rtpoll
);
1618 pa_alsa_fdlist_free(u
->mixer_fdl
);
1620 if (u
->mixer_handle
)
1621 snd_mixer_close(u
->mixer_handle
);
1623 if (u
->pcm_handle
) {
1624 snd_pcm_drop(u
->pcm_handle
);
1625 snd_pcm_close(u
->pcm_handle
);
1629 pa_smoother_free(u
->smoother
);
1631 pa_xfree(u
->device_name
);
1634 snd_config_update_free_global();
1636 pa_alsa_redirect_errors_dec();