4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as published
11 by the Free Software Foundation; either version 2 of the License,
12 or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <asoundlib.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/util.h>
35 #include <pulse/timeval.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/rtclock.h>
51 #include <pulsecore/time-smoother.h>
53 #include "alsa-util.h"
54 #include "module-alsa-sink-symdef.h"
56 PA_MODULE_AUTHOR("Lennart Poettering");
57 PA_MODULE_DESCRIPTION("ALSA Sink");
58 PA_MODULE_VERSION(PACKAGE_VERSION
);
59 PA_MODULE_LOAD_ONCE(FALSE
);
61 "sink_name=<name for the sink> "
62 "device=<ALSA device> "
63 "device_id=<ALSA card index> "
64 "format=<sample format> "
66 "channels=<number of channels> "
67 "channel_map=<channel map> "
68 "fragments=<number of fragments> "
69 "fragment_size=<fragment size> "
70 "mmap=<enable memory mapping?> "
71 "tsched=<enable system timer based scheduling mode?> "
72 "tsched_buffer_size=<buffer size when using timer based scheduling> "
73 "tsched_buffer_watermark=<lower fill watermark> "
74 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
76 static const char* const valid_modargs
[] = {
89 "tsched_buffer_watermark",
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (5*PA_USEC_PER_SEC) /* 5s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
106 pa_thread_mq thread_mq
;
109 snd_pcm_t
*pcm_handle
;
111 pa_alsa_fdlist
*mixer_fdl
;
112 snd_mixer_t
*mixer_handle
;
113 snd_mixer_elem_t
*mixer_elem
;
114 long hw_volume_max
, hw_volume_min
;
115 long hw_dB_max
, hw_dB_min
;
116 pa_bool_t hw_dB_supported
;
118 size_t frame_size
, fragment_size
, hwbuf_size
, tsched_watermark
;
120 pa_memchunk memchunk
;
124 pa_bool_t use_mmap
, use_tsched
;
126 pa_bool_t first
, after_rewind
;
128 pa_rtpoll_item
*alsa_rtpoll_item
;
130 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
132 pa_smoother
*smoother
;
134 uint64_t since_start
;
136 snd_pcm_sframes_t hwbuf_unused_frames
;
139 static void fix_tsched_watermark(struct userdata
*u
) {
141 size_t min_sleep
, min_wakeup
;
144 max_use
= u
->hwbuf_size
- u
->hwbuf_unused_frames
* u
->frame_size
;
146 min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
147 min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
149 if (min_sleep
> max_use
/2)
150 min_sleep
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
151 if (min_sleep
< u
->frame_size
)
152 min_sleep
= u
->frame_size
;
154 if (min_wakeup
> max_use
/2)
155 min_wakeup
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
156 if (min_wakeup
< u
->frame_size
)
157 min_wakeup
= u
->frame_size
;
159 if (u
->tsched_watermark
> max_use
-min_sleep
)
160 u
->tsched_watermark
= max_use
-min_sleep
;
162 if (u
->tsched_watermark
< min_wakeup
)
163 u
->tsched_watermark
= min_wakeup
;
166 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
169 pa_assert(sleep_usec
);
170 pa_assert(process_usec
);
174 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
176 if (usec
== (pa_usec_t
) -1)
177 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
179 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
181 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
184 *sleep_usec
= usec
- wm
;
187 *process_usec
= *sleep_usec
= usec
/ 2;
189 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
197 pa_log_debug("%s: %s", call
, snd_strerror(err
));
199 pa_assert(err
!= -EAGAIN
);
202 pa_log_debug("%s: Buffer underrun!", call
);
204 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) == 0) {
210 pa_log("%s: %s", call
, snd_strerror(err
));
214 static size_t check_left_to_play(struct userdata
*u
, snd_pcm_sframes_t n
) {
217 if (n
*u
->frame_size
< u
->hwbuf_size
)
218 left_to_play
= u
->hwbuf_size
- (n
*u
->frame_size
);
222 if (left_to_play
> 0) {
223 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
224 } else if (!u
->first
&& !u
->after_rewind
) {
225 pa_log_info("Underrun!");
228 size_t old_watermark
= u
->tsched_watermark
;
230 u
->tsched_watermark
*= 2;
231 fix_tsched_watermark(u
);
233 if (old_watermark
!= u
->tsched_watermark
)
234 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
235 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
242 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
244 pa_usec_t max_sleep_usec
, process_usec
;
248 pa_sink_assert_ref(u
->sink
);
251 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
257 snd_pcm_hwsync(u
->pcm_handle
);
259 /* First we determine how many samples are missing to fill the
260 * buffer up to 100% */
262 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
264 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
270 left_to_play
= check_left_to_play(u
, n
);
274 /* We won't fill up the playback buffer before at least
275 * half the sleep time is over because otherwise we might
276 * ask for more data from the clients then they expect. We
277 * need to guarantee that clients only have to keep around
278 * a single hw buffer length. */
280 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > max_sleep_usec
/2)
283 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
286 n
-= u
->hwbuf_unused_frames
;
288 /* pa_log_debug("Filling up"); */
294 const snd_pcm_channel_area_t
*areas
;
295 snd_pcm_uframes_t offset
, frames
= (snd_pcm_uframes_t
) n
;
297 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
299 if (PA_UNLIKELY((err
= snd_pcm_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
)) < 0)) {
301 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
307 /* Make sure that if these memblocks need to be copied they will fit into one slot */
308 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
309 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
311 /* Check these are multiples of 8 bit */
312 pa_assert((areas
[0].first
& 7) == 0);
313 pa_assert((areas
[0].step
& 7)== 0);
315 /* We assume a single interleaved memory buffer */
316 pa_assert((areas
[0].first
>> 3) == 0);
317 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
319 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
321 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
322 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
325 pa_sink_render_into_full(u
->sink
, &chunk
);
327 /* FIXME: Maybe we can do something to keep this memory block
328 * a little bit longer around? */
329 pa_memblock_unref_fixed(chunk
.memblock
);
331 if (PA_UNLIKELY((err
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
333 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", err
)) == 0)
341 u
->frame_index
+= frames
;
342 u
->since_start
+= frames
* u
->frame_size
;
344 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
346 if (frames
>= (snd_pcm_uframes_t
) n
)
353 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
357 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
) {
359 pa_usec_t max_sleep_usec
, process_usec
;
363 pa_sink_assert_ref(u
->sink
);
366 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
372 snd_pcm_hwsync(u
->pcm_handle
);
374 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
376 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
382 left_to_play
= check_left_to_play(u
, n
);
386 /* We won't fill up the playback buffer before at least
387 * half the sleep time is over because otherwise we might
388 * ask for more data from the clients then they expect. We
389 * need to guarantee that clients only have to keep around
390 * a single hw buffer length. */
392 if (pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > max_sleep_usec
/2)
395 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
398 n
-= u
->hwbuf_unused_frames
;
401 snd_pcm_sframes_t frames
;
404 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
406 if (u
->memchunk
.length
<= 0)
407 pa_sink_render(u
->sink
, n
* u
->frame_size
, &u
->memchunk
);
409 pa_assert(u
->memchunk
.length
> 0);
411 frames
= u
->memchunk
.length
/ u
->frame_size
;
416 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
417 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, frames
);
418 pa_memblock_release(u
->memchunk
.memblock
);
420 pa_assert(frames
!= 0);
422 if (PA_UNLIKELY(frames
< 0)) {
424 if ((r
= try_recover(u
, "snd_pcm_writei", n
)) == 0)
430 u
->memchunk
.index
+= frames
* u
->frame_size
;
431 u
->memchunk
.length
-= frames
* u
->frame_size
;
433 if (u
->memchunk
.length
<= 0) {
434 pa_memblock_unref(u
->memchunk
.memblock
);
435 pa_memchunk_reset(&u
->memchunk
);
440 u
->frame_index
+= frames
;
441 u
->since_start
+= frames
* u
->frame_size
;
443 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
452 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
456 static void update_smoother(struct userdata
*u
) {
457 snd_pcm_sframes_t delay
= 0;
460 pa_usec_t now1
, now2
;
461 /* struct timeval timestamp; */
462 snd_pcm_status_t
*status
;
464 snd_pcm_status_alloca(&status
);
467 pa_assert(u
->pcm_handle
);
469 /* Let's update the time smoother */
471 snd_pcm_hwsync(u
->pcm_handle
);
472 snd_pcm_avail_update(u
->pcm_handle
);
474 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
475 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
479 /* delay = snd_pcm_status_get_delay(status); */
481 if (PA_UNLIKELY((err
= snd_pcm_delay(u
->pcm_handle
, &delay
)) < 0)) {
482 pa_log("Failed to query DSP status data: %s", snd_strerror(err
));
486 frames
= u
->frame_index
- delay
;
488 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
490 /* snd_pcm_status_get_tstamp(status, ×tamp); */
491 /* pa_rtclock_from_wallclock(×tamp); */
492 /* now1 = pa_timeval_load(×tamp); */
494 now1
= pa_rtclock_usec();
495 now2
= pa_bytes_to_usec(frames
* u
->frame_size
, &u
->sink
->sample_spec
);
496 pa_smoother_put(u
->smoother
, now1
, now2
);
499 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
502 pa_usec_t now1
, now2
;
506 now1
= pa_rtclock_usec();
507 now2
= pa_smoother_get(u
->smoother
, now1
);
509 delay
= (int64_t) pa_bytes_to_usec(u
->frame_index
* u
->frame_size
, &u
->sink
->sample_spec
) - (int64_t) now2
;
512 r
= (pa_usec_t
) delay
;
514 if (u
->memchunk
.memblock
)
515 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
520 static int build_pollfd(struct userdata
*u
) {
522 pa_assert(u
->pcm_handle
);
524 if (u
->alsa_rtpoll_item
)
525 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
527 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
533 static int suspend(struct userdata
*u
) {
535 pa_assert(u
->pcm_handle
);
537 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
540 snd_pcm_drain(u
->pcm_handle
);
541 snd_pcm_close(u
->pcm_handle
);
542 u
->pcm_handle
= NULL
;
544 if (u
->alsa_rtpoll_item
) {
545 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
546 u
->alsa_rtpoll_item
= NULL
;
549 pa_log_info("Device suspended...");
554 static int update_sw_params(struct userdata
*u
) {
555 snd_pcm_uframes_t avail_min
;
560 /* Use the full buffer if noone asked us for anything specific */
561 u
->hwbuf_unused_frames
= 0;
566 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
569 pa_log_debug("latency set to %0.2f", (double) latency
/ PA_USEC_PER_MSEC
);
571 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
573 /* We need at least one sample in our buffer */
575 if (PA_UNLIKELY(b
< u
->frame_size
))
578 u
->hwbuf_unused_frames
=
579 PA_LIKELY(b
< u
->hwbuf_size
) ?
580 ((u
->hwbuf_size
- b
) / u
->frame_size
) : 0;
582 fix_tsched_watermark(u
);
586 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u
->hwbuf_unused_frames
);
588 /* We need at last one frame in the used part of the buffer */
589 avail_min
= u
->hwbuf_unused_frames
+ 1;
592 pa_usec_t sleep_usec
, process_usec
;
594 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
595 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
);
598 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
600 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
601 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
608 static int unsuspend(struct userdata
*u
) {
613 snd_pcm_uframes_t period_size
;
616 pa_assert(!u
->pcm_handle
);
618 pa_log_info("Trying resume...");
620 snd_config_update_free_global();
621 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
, SND_PCM_NONBLOCK
)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
626 ss
= u
->sink
->sample_spec
;
627 nfrags
= u
->nfragments
;
628 period_size
= u
->fragment_size
/ u
->frame_size
;
632 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
637 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
642 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
647 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
652 if (update_sw_params(u
) < 0)
655 if (build_pollfd(u
) < 0)
658 /* FIXME: We need to reload the volume somehow */
663 pa_log_info("Resumed successfully...");
669 snd_pcm_close(u
->pcm_handle
);
670 u
->pcm_handle
= NULL
;
676 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
677 struct userdata
*u
= PA_SINK(o
)->userdata
;
681 case PA_SINK_MESSAGE_GET_LATENCY
: {
685 r
= sink_get_latency(u
);
687 *((pa_usec_t
*) data
) = r
;
692 case PA_SINK_MESSAGE_SET_STATE
:
694 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
696 case PA_SINK_SUSPENDED
:
697 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
705 case PA_SINK_RUNNING
:
707 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
708 if (build_pollfd(u
) < 0)
712 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
713 if (unsuspend(u
) < 0)
719 case PA_SINK_UNLINKED
:
727 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
730 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
731 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
734 pa_assert(u
->mixer_handle
);
736 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
739 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
740 pa_sink_get_volume(u
->sink
);
741 pa_sink_get_mute(u
->sink
);
747 static int sink_get_volume_cb(pa_sink
*s
) {
748 struct userdata
*u
= s
->userdata
;
753 pa_assert(u
->mixer_elem
);
755 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
758 pa_assert(snd_mixer_selem_has_playback_channel(u
->mixer_elem
, u
->mixer_map
[i
]));
760 if (u
->hw_dB_supported
) {
762 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) >= 0) {
763 s
->volume
.values
[i
] = pa_sw_volume_from_dB(alsa_vol
/ 100.0);
767 u
->hw_dB_supported
= FALSE
;
770 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
773 s
->volume
.values
[i
] = (pa_volume_t
) roundf(((float) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
779 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
784 static int sink_set_volume_cb(pa_sink
*s
) {
785 struct userdata
*u
= s
->userdata
;
790 pa_assert(u
->mixer_elem
);
792 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
796 pa_assert(snd_mixer_selem_has_playback_channel(u
->mixer_elem
, u
->mixer_map
[i
]));
798 vol
= PA_MIN(s
->volume
.values
[i
], PA_VOLUME_NORM
);
800 if (u
->hw_dB_supported
) {
801 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
802 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
804 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, -1)) >= 0) {
806 if (snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
) >= 0)
807 s
->volume
.values
[i
] = pa_sw_volume_from_dB(alsa_vol
/ 100.0);
812 u
->hw_dB_supported
= FALSE
;
816 alsa_vol
= (long) roundf(((float) vol
* (u
->hw_volume_max
- u
->hw_volume_min
)) / PA_VOLUME_NORM
) + u
->hw_volume_min
;
817 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
819 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
822 if (snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
) >= 0)
823 s
->volume
.values
[i
] = (pa_volume_t
) roundf(((float) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
829 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
834 static int sink_get_mute_cb(pa_sink
*s
) {
835 struct userdata
*u
= s
->userdata
;
839 pa_assert(u
->mixer_elem
);
841 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
842 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
851 static int sink_set_mute_cb(pa_sink
*s
) {
852 struct userdata
*u
= s
->userdata
;
856 pa_assert(u
->mixer_elem
);
858 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
859 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
866 static void sink_update_requested_latency_cb(pa_sink
*s
) {
867 struct userdata
*u
= s
->userdata
;
868 snd_pcm_sframes_t before
;
874 before
= u
->hwbuf_unused_frames
;
877 /* Let's check whether we now use only a smaller part of the
878 buffer then before. If so, we need to make sure that subsequent
879 rewinds are relative to the new maxium fill level and not to the
880 current fill level. Thus, let's do a full rewind once, to clear
883 if (u
->hwbuf_unused_frames
> before
) {
884 pa_log_debug("Requesting rewind due to latency change.");
885 pa_sink_request_rewind(s
, 0);
889 static int process_rewind(struct userdata
*u
) {
890 snd_pcm_sframes_t unused
;
891 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
894 /* Figure out how much we shall rewind and reset the counter */
895 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
896 u
->sink
->thread_info
.rewind_nbytes
= 0;
898 pa_assert(rewind_nbytes
> 0);
899 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
901 snd_pcm_hwsync(u
->pcm_handle
);
902 if ((unused
= snd_pcm_avail_update(u
->pcm_handle
)) < 0) {
903 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused
));
907 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
909 if (u
->hwbuf_size
> unused_nbytes
)
910 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
914 if (rewind_nbytes
> limit_nbytes
)
915 rewind_nbytes
= limit_nbytes
;
917 if (rewind_nbytes
> 0) {
918 snd_pcm_sframes_t in_frames
, out_frames
;
920 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
922 in_frames
= (snd_pcm_sframes_t
) rewind_nbytes
/ u
->frame_size
;
923 pa_log_debug("before: %lu", (unsigned long) in_frames
);
924 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, in_frames
)) < 0) {
925 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames
));
928 pa_log_debug("after: %lu", (unsigned long) out_frames
);
930 rewind_nbytes
= out_frames
* u
->frame_size
;
932 if (rewind_nbytes
<= 0)
933 pa_log_info("Tried rewind, but was apparently not possible.");
935 u
->frame_index
-= out_frames
;
936 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
937 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
939 u
->after_rewind
= TRUE
;
942 pa_log_debug("Mhmm, actually there is nothing to rewind.");
947 static void thread_func(void *userdata
) {
948 struct userdata
*u
= userdata
;
952 pa_log_debug("Thread starting up");
954 if (u
->core
->realtime_scheduling
)
955 pa_make_realtime(u
->core
->realtime_priority
);
957 pa_thread_mq_install(&u
->thread_mq
);
958 pa_rtpoll_install(u
->rtpoll
);
963 /* pa_log_debug("loop"); */
965 /* Render some data and write it to the dsp */
966 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
968 pa_usec_t sleep_usec
;
970 if (u
->sink
->thread_info
.rewind_nbytes
> 0)
971 if (process_rewind(u
) < 0)
975 work_done
= mmap_write(u
, &sleep_usec
);
977 work_done
= unix_write(u
, &sleep_usec
);
982 /* pa_log_debug("work_done = %i", work_done); */
987 pa_log_info("Starting playback.");
988 snd_pcm_start(u
->pcm_handle
);
990 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
999 if (u
->since_start
<= u
->hwbuf_size
) {
1001 /* USB devices on ALSA seem to hit a buffer
1002 * underrun during the first iterations much
1003 * quicker then we calculate here, probably due to
1004 * the transport latency. To accomodate for that
1005 * we artificially decrease the sleep time until
1006 * we have filled the buffer at least once
1009 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1013 /* OK, the playback buffer is now full, let's
1014 * calculate when to wake up next */
1015 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1017 /* Convert from the sound card time domain to the
1018 * system time domain */
1019 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), sleep_usec
);
1021 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1023 /* We don't trust the conversion, so we wake up whatever comes first */
1024 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1028 u
->after_rewind
= FALSE
;
1030 } else if (u
->use_tsched
)
1032 /* OK, we're in an invalid state, let's disable our timers */
1033 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1035 /* Hmm, nothing to do. Let's sleep */
1036 if ((ret
= pa_rtpoll_run(u
->rtpoll
, 1)) < 0)
1042 /* Tell ALSA about this and process its response */
1043 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1044 struct pollfd
*pollfd
;
1045 unsigned short revents
= 0;
1049 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1051 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1052 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
1056 if (revents
& (POLLERR
|POLLNVAL
|POLLHUP
)) {
1057 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1064 if (revents
&& u
->use_tsched
)
1065 pa_log_debug("Wakeup from ALSA! (%i)", revents
);
1070 /* If this was no regular exit from the loop we have to continue
1071 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1072 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1073 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1076 pa_log_debug("Thread shutting down");
1079 int pa__init(pa_module
*m
) {
1081 pa_modargs
*ma
= NULL
;
1082 struct userdata
*u
= NULL
;
1086 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1087 snd_pcm_uframes_t period_frames
, tsched_frames
;
1089 snd_pcm_info_t
*pcm_info
= NULL
;
1092 char *name_buf
= NULL
;
1093 pa_bool_t namereg_fail
;
1094 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, mixer_reset
= TRUE
;
1096 pa_sink_new_data data
;
1098 snd_pcm_info_alloca(&pcm_info
);
1102 pa_alsa_redirect_errors_inc();
1104 if (!(ma
= pa_modargs_new(m
->argument
, valid_modargs
))) {
1105 pa_log("Failed to parse module arguments");
1109 ss
= m
->core
->default_sample_spec
;
1110 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1111 pa_log("Failed to parse sample specification and channel map");
1115 frame_size
= pa_frame_size(&ss
);
1117 nfrags
= m
->core
->default_n_fragments
;
1118 frag_size
= pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1120 frag_size
= frame_size
;
1121 tsched_size
= pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1122 tsched_watermark
= pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1124 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1125 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1126 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1127 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1128 pa_log("Failed to parse buffer metrics");
1132 hwbuf_size
= frag_size
* nfrags
;
1133 period_frames
= frag_size
/frame_size
;
1134 tsched_frames
= tsched_size
/frame_size
;
1136 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1137 pa_log("Failed to parse mmap argument.");
1141 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1142 pa_log("Failed to parse timer_scheduling argument.");
1146 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1147 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1151 if (pa_modargs_get_value_boolean(ma
, "mixer_reset", &mixer_reset
) < 0) {
1152 pa_log("Failed to parse mixer_reset argument.");
1156 u
= pa_xnew0(struct userdata
, 1);
1160 u
->use_mmap
= use_mmap
;
1161 u
->use_tsched
= use_tsched
;
1164 u
->after_rewind
= FALSE
;
1165 u
->rtpoll
= pa_rtpoll_new();
1166 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1167 u
->alsa_rtpoll_item
= NULL
;
1169 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
, 5);
1170 usec
= pa_rtclock_usec();
1171 pa_smoother_set_time_offset(u
->smoother
, usec
);
1172 pa_smoother_pause(u
->smoother
, usec
);
1174 snd_config_update_free_global();
1179 if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1181 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id(
1185 SND_PCM_STREAM_PLAYBACK
,
1186 &nfrags
, &period_frames
, tsched_frames
,
1193 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1194 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1197 SND_PCM_STREAM_PLAYBACK
,
1198 &nfrags
, &period_frames
, tsched_frames
,
1204 pa_assert(u
->device_name
);
1205 pa_log_info("Successfully opened device %s.", u
->device_name
);
1207 if (use_mmap
&& !b
) {
1208 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1209 u
->use_mmap
= use_mmap
= FALSE
;
1212 if (use_tsched
&& (!b
|| !d
)) {
1213 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1214 u
->use_tsched
= use_tsched
= FALSE
;
1218 pa_log_info("Successfully enabled mmap() mode.");
1221 pa_log_info("Successfully enabled timer-based scheduling mode.");
1223 if ((err
= snd_pcm_info(u
->pcm_handle
, pcm_info
)) < 0) {
1224 pa_log("Error fetching PCM info: %s", snd_strerror(err
));
1228 /* ALSA might tweak the sample spec, so recalculate the frame size */
1229 frame_size
= pa_frame_size(&ss
);
1231 if ((err
= snd_mixer_open(&u
->mixer_handle
, 0)) < 0)
1232 pa_log_warn("Error opening mixer: %s", snd_strerror(err
));
1234 pa_bool_t found
= FALSE
;
1236 if (pa_alsa_prepare_mixer(u
->mixer_handle
, u
->device_name
) >= 0)
1239 snd_pcm_info_t
*info
;
1241 snd_pcm_info_alloca(&info
);
1243 if (snd_pcm_info(u
->pcm_handle
, info
) >= 0) {
1247 if ((card
= snd_pcm_info_get_card(info
)) >= 0) {
1249 md
= pa_sprintf_malloc("hw:%i", card
);
1251 if (strcmp(u
->device_name
, md
))
1252 if (pa_alsa_prepare_mixer(u
->mixer_handle
, md
) >= 0)
1260 if (!(u
->mixer_elem
= pa_alsa_find_elem(u
->mixer_handle
, "Master", "PCM")))
1264 snd_mixer_close(u
->mixer_handle
);
1265 u
->mixer_handle
= NULL
;
1269 if ((name
= pa_modargs_get_value(ma
, "sink_name", NULL
)))
1270 namereg_fail
= TRUE
;
1272 name
= name_buf
= pa_sprintf_malloc("alsa_output.%s", u
->device_name
);
1273 namereg_fail
= FALSE
;
1276 pa_sink_new_data_init(&data
);
1277 data
.driver
= __FILE__
;
1279 pa_sink_new_data_set_name(&data
, name
);
1280 data
.namereg_fail
= namereg_fail
;
1281 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1282 pa_sink_new_data_set_channel_map(&data
, &map
);
1284 pa_alsa_init_proplist(data
.proplist
, pcm_info
);
1285 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1286 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1287 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1288 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1290 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
);
1291 pa_sink_new_data_done(&data
);
1295 pa_log("Failed to create sink object");
1299 u
->sink
->parent
.process_msg
= sink_process_msg
;
1300 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1301 u
->sink
->userdata
= u
;
1303 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1304 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1306 u
->frame_size
= frame_size
;
1307 u
->fragment_size
= frag_size
= period_frames
* frame_size
;
1308 u
->nfragments
= nfrags
;
1309 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1310 u
->hwbuf_unused_frames
= 0;
1311 u
->tsched_watermark
= tsched_watermark
;
1313 u
->hw_dB_supported
= FALSE
;
1314 u
->hw_dB_min
= u
->hw_dB_max
= 0;
1315 u
->hw_volume_min
= u
->hw_volume_max
= 0;
1318 fix_tsched_watermark(u
);
1320 u
->sink
->thread_info
.max_rewind
= use_tsched
? u
->hwbuf_size
: 0;
1321 u
->sink
->max_latency
= pa_bytes_to_usec(u
->hwbuf_size
, &ss
);
1323 u
->sink
->min_latency
= u
->sink
->max_latency
;
1325 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1326 nfrags
, (long unsigned) u
->fragment_size
,
1327 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1330 pa_log_info("Time scheduling watermark is %0.2fms",
1331 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1333 if (update_sw_params(u
) < 0)
1336 pa_memchunk_reset(&u
->memchunk
);
1338 if (u
->mixer_handle
) {
1339 pa_assert(u
->mixer_elem
);
1341 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
))
1343 if (pa_alsa_calc_mixer_map(u
->mixer_elem
, &map
, u
->mixer_map
, TRUE
) >= 0 &&
1344 snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) >= 0) {
1346 pa_bool_t suitable
= TRUE
;
1348 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1350 if (u
->hw_volume_min
> u
->hw_volume_max
) {
1352 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u
->hw_volume_min
, u
->hw_volume_max
);
1355 } else if (u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1357 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1360 } else if (snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) >= 0) {
1362 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1364 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u
->hw_dB_min
/100.0, u
->hw_dB_max
/100.0);
1366 /* Let's see if this thing actually is useful for muting */
1367 if (u
->hw_dB_min
> -6000) {
1368 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u
->hw_dB_min
) / 100);
1371 } else if (u
->hw_dB_max
< 0) {
1373 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u
->hw_dB_max
) / 100);
1376 } else if (u
->hw_dB_min
>= u
->hw_dB_max
) {
1378 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u
->hw_dB_min
) / 100, ((double) u
->hw_dB_max
) / 100);
1383 if (u
->hw_dB_max
> 0) {
1384 /* dB > 0 means overamplification, and clipping, we don't want that here */
1385 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u
->hw_dB_max
) / 100);
1389 u
->hw_dB_supported
= TRUE
;
1394 u
->sink
->get_volume
= sink_get_volume_cb
;
1395 u
->sink
->set_volume
= sink_set_volume_cb
;
1396 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1397 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1399 } else if (mixer_reset
) {
1400 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1401 pa_alsa_0dB_playback(u
->mixer_elem
);
1403 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1406 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1407 u
->sink
->get_mute
= sink_get_mute_cb
;
1408 u
->sink
->set_mute
= sink_set_mute_cb
;
1409 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1412 u
->mixer_fdl
= pa_alsa_fdlist_new();
1414 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, m
->core
->mainloop
) < 0) {
1415 pa_log("Failed to initialize file descriptor monitoring");
1419 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1420 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1422 u
->mixer_fdl
= NULL
;
1424 pa_alsa_dump(u
->pcm_handle
);
1426 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1427 pa_log("Failed to create thread.");
1431 /* Get initial mixer settings */
1432 if (data
.volume_is_set
) {
1433 if (u
->sink
->set_volume
)
1434 u
->sink
->set_volume(u
->sink
);
1436 if (u
->sink
->get_volume
)
1437 u
->sink
->get_volume(u
->sink
);
1440 if (data
.muted_is_set
) {
1441 if (u
->sink
->set_mute
)
1442 u
->sink
->set_mute(u
->sink
);
1444 if (u
->sink
->get_mute
)
1445 u
->sink
->get_mute(u
->sink
);
1448 pa_sink_put(u
->sink
);
1450 pa_modargs_free(ma
);
1457 pa_modargs_free(ma
);
1464 void pa__done(pa_module
*m
) {
1469 if (!(u
= m
->userdata
)) {
1470 pa_alsa_redirect_errors_dec();
1475 pa_sink_unlink(u
->sink
);
1478 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1479 pa_thread_free(u
->thread
);
1482 pa_thread_mq_done(&u
->thread_mq
);
1485 pa_sink_unref(u
->sink
);
1487 if (u
->memchunk
.memblock
)
1488 pa_memblock_unref(u
->memchunk
.memblock
);
1490 if (u
->alsa_rtpoll_item
)
1491 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1494 pa_rtpoll_free(u
->rtpoll
);
1497 pa_alsa_fdlist_free(u
->mixer_fdl
);
1499 if (u
->mixer_handle
)
1500 snd_mixer_close(u
->mixer_handle
);
1502 if (u
->pcm_handle
) {
1503 snd_pcm_drop(u
->pcm_handle
);
1504 snd_pcm_close(u
->pcm_handle
);
1508 pa_smoother_free(u
->smoother
);
1510 pa_xfree(u
->device_name
);
1513 snd_config_update_free_global();
1515 pa_alsa_redirect_errors_dec();