4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as published
11 by the Free Software Foundation; either version 2 of the License,
12 or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public License
20 along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <asoundlib.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/util.h>
35 #include <pulse/timeval.h>
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/rtclock.h>
51 #include <pulsecore/time-smoother.h>
53 #include "alsa-util.h"
54 #include "module-alsa-sink-symdef.h"
56 PA_MODULE_AUTHOR("Lennart Poettering");
57 PA_MODULE_DESCRIPTION("ALSA Sink");
58 PA_MODULE_VERSION(PACKAGE_VERSION
);
59 PA_MODULE_LOAD_ONCE(FALSE
);
61 "sink_name=<name for the sink> "
62 "device=<ALSA device> "
63 "device_id=<ALSA card index> "
64 "format=<sample format> "
66 "channels=<number of channels> "
67 "channel_map=<channel map> "
68 "fragments=<number of fragments> "
69 "fragment_size=<fragment size> "
70 "mmap=<enable memory mapping?> "
71 "tsched=<enable system timer based scheduling mode?> "
72 "tsched_buffer_size=<buffer size when using timer based scheduling> "
73 "tsched_buffer_watermark=<lower fill watermark> "
74 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
76 static const char* const valid_modargs
[] = {
89 "tsched_buffer_watermark",
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (5*PA_USEC_PER_SEC) /* 5s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
106 pa_thread_mq thread_mq
;
109 snd_pcm_t
*pcm_handle
;
111 pa_alsa_fdlist
*mixer_fdl
;
112 snd_mixer_t
*mixer_handle
;
113 snd_mixer_elem_t
*mixer_elem
;
114 long hw_volume_max
, hw_volume_min
;
115 long hw_dB_max
, hw_dB_min
;
116 pa_bool_t hw_dB_supported
;
118 size_t frame_size
, fragment_size
, hwbuf_size
, tsched_watermark
;
120 pa_memchunk memchunk
;
124 pa_bool_t use_mmap
, use_tsched
;
126 pa_bool_t first
, after_rewind
;
128 pa_rtpoll_item
*alsa_rtpoll_item
;
130 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
132 pa_smoother
*smoother
;
135 snd_pcm_sframes_t hwbuf_unused_frames
;
138 static void fix_tsched_watermark(struct userdata
*u
) {
140 size_t min_sleep
, min_wakeup
;
143 max_use
= u
->hwbuf_size
- u
->hwbuf_unused_frames
* u
->frame_size
;
145 min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
146 min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
148 if (min_sleep
> max_use
/2)
149 min_sleep
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
150 if (min_sleep
< u
->frame_size
)
151 min_sleep
= u
->frame_size
;
153 if (min_wakeup
> max_use
/2)
154 min_wakeup
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
155 if (min_wakeup
< u
->frame_size
)
156 min_wakeup
= u
->frame_size
;
158 if (u
->tsched_watermark
> max_use
-min_sleep
)
159 u
->tsched_watermark
= max_use
-min_sleep
;
161 if (u
->tsched_watermark
< min_wakeup
)
162 u
->tsched_watermark
= min_wakeup
;
165 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
170 pa_log_debug("%s: %s", call
, snd_strerror(err
));
172 if (err
== -EAGAIN
) {
173 pa_log_debug("%s: EAGAIN", call
);
178 pa_log_debug("%s: Buffer underrun!", call
);
180 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) == 0) {
185 pa_log("%s: %s", call
, snd_strerror(err
));
189 static void check_left_to_play(struct userdata
*u
, snd_pcm_sframes_t n
) {
192 if (u
->first
|| u
->after_rewind
)
195 if (n
*u
->frame_size
< u
->hwbuf_size
)
196 left_to_play
= u
->hwbuf_size
- (n
*u
->frame_size
);
200 if (left_to_play
> 0)
201 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
203 pa_log_info("Underrun!");
206 size_t old_watermark
= u
->tsched_watermark
;
208 u
->tsched_watermark
*= 2;
209 fix_tsched_watermark(u
);
211 if (old_watermark
!= u
->tsched_watermark
)
212 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
213 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
218 static int mmap_write(struct userdata
*u
) {
220 pa_bool_t checked_left_to_play
= FALSE
;
223 pa_sink_assert_ref(u
->sink
);
230 const snd_pcm_channel_area_t
*areas
;
231 snd_pcm_uframes_t offset
, frames
;
233 snd_pcm_hwsync(u
->pcm_handle
);
235 /* First we determine how many samples are missing to fill the
236 * buffer up to 100% */
238 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
240 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
248 if (!checked_left_to_play
) {
249 check_left_to_play(u
, n
);
250 checked_left_to_play
= TRUE
;
253 /* We only use part of the buffer that matches our
254 * dynamically requested latency */
256 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
259 frames
= n
= n
- u
->hwbuf_unused_frames
;
261 pa_log_debug("%lu frames to write", (unsigned long) frames
);
263 if (PA_UNLIKELY((err
= snd_pcm_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
)) < 0)) {
265 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
273 /* Make sure that if these memblocks need to be copied they will fit into one slot */
274 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
275 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
277 /* Check these are multiples of 8 bit */
278 pa_assert((areas
[0].first
& 7) == 0);
279 pa_assert((areas
[0].step
& 7)== 0);
281 /* We assume a single interleaved memory buffer */
282 pa_assert((areas
[0].first
>> 3) == 0);
283 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
285 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
287 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
288 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
291 pa_sink_render_into_full(u
->sink
, &chunk
);
293 /* FIXME: Maybe we can do something to keep this memory block
294 * a little bit longer around? */
295 pa_memblock_unref_fixed(chunk
.memblock
);
297 if (PA_UNLIKELY((err
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
299 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", err
)) == 0)
309 u
->frame_index
+= frames
;
311 pa_log_debug("wrote %lu frames", (unsigned long) frames
);
313 if (PA_LIKELY(frames
>= (snd_pcm_uframes_t
) n
))
318 static int unix_write(struct userdata
*u
) {
320 pa_bool_t checked_left_to_play
= FALSE
;
323 pa_sink_assert_ref(u
->sink
);
327 snd_pcm_sframes_t n
, frames
;
330 snd_pcm_hwsync(u
->pcm_handle
);
332 if (PA_UNLIKELY((n
= snd_pcm_avail_update(u
->pcm_handle
)) < 0)) {
334 if ((r
= try_recover(u
, "snd_pcm_avail_update", n
)) == 0)
342 if (!checked_left_to_play
) {
343 check_left_to_play(u
, n
);
344 checked_left_to_play
= TRUE
;
347 if (PA_UNLIKELY(n
<= u
->hwbuf_unused_frames
))
350 n
-= u
->hwbuf_unused_frames
;
352 pa_log_debug("%lu frames to write", (unsigned long) frames
);
354 if (u
->memchunk
.length
<= 0)
355 pa_sink_render(u
->sink
, n
* u
->frame_size
, &u
->memchunk
);
357 pa_assert(u
->memchunk
.length
> 0);
359 frames
= u
->memchunk
.length
/ u
->frame_size
;
364 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
365 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, frames
);
366 pa_memblock_release(u
->memchunk
.memblock
);
368 pa_assert(frames
!= 0);
370 if (PA_UNLIKELY(frames
< 0)) {
372 if ((r
= try_recover(u
, "snd_pcm_writei", n
)) == 0)
380 u
->memchunk
.index
+= frames
* u
->frame_size
;
381 u
->memchunk
.length
-= frames
* u
->frame_size
;
383 if (u
->memchunk
.length
<= 0) {
384 pa_memblock_unref(u
->memchunk
.memblock
);
385 pa_memchunk_reset(&u
->memchunk
);
390 u
->frame_index
+= frames
;
392 pa_log_debug("wrote %lu frames", (unsigned long) frames
);
394 if (PA_LIKELY(frames
>= n
))
399 static void update_smoother(struct userdata
*u
) {
400 snd_pcm_sframes_t delay
= 0;
403 pa_usec_t now1
, now2
;
404 /* struct timeval timestamp; */
405 snd_pcm_status_t
*status
;
407 snd_pcm_status_alloca(&status
);
410 pa_assert(u
->pcm_handle
);
412 /* Let's update the time smoother */
414 snd_pcm_hwsync(u
->pcm_handle
);
415 snd_pcm_avail_update(u
->pcm_handle
);
417 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
418 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
422 /* delay = snd_pcm_status_get_delay(status); */
424 if (PA_UNLIKELY((err
= snd_pcm_delay(u
->pcm_handle
, &delay
)) < 0)) {
425 pa_log("Failed to query DSP status data: %s", snd_strerror(err
));
429 frames
= u
->frame_index
- delay
;
431 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
433 /* snd_pcm_status_get_tstamp(status, ×tamp); */
434 /* pa_rtclock_from_wallclock(×tamp); */
435 /* now1 = pa_timeval_load(×tamp); */
437 now1
= pa_rtclock_usec();
438 now2
= pa_bytes_to_usec(frames
* u
->frame_size
, &u
->sink
->sample_spec
);
439 pa_smoother_put(u
->smoother
, now1
, now2
);
442 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
445 pa_usec_t now1
, now2
;
449 now1
= pa_rtclock_usec();
450 now2
= pa_smoother_get(u
->smoother
, now1
);
452 delay
= (int64_t) pa_bytes_to_usec(u
->frame_index
* u
->frame_size
, &u
->sink
->sample_spec
) - (int64_t) now2
;
455 r
= (pa_usec_t
) delay
;
457 if (u
->memchunk
.memblock
)
458 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
463 static int build_pollfd(struct userdata
*u
) {
465 pa_assert(u
->pcm_handle
);
467 if (u
->alsa_rtpoll_item
)
468 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
470 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
476 static int suspend(struct userdata
*u
) {
478 pa_assert(u
->pcm_handle
);
480 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
483 snd_pcm_drain(u
->pcm_handle
);
484 snd_pcm_close(u
->pcm_handle
);
485 u
->pcm_handle
= NULL
;
487 if (u
->alsa_rtpoll_item
) {
488 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
489 u
->alsa_rtpoll_item
= NULL
;
492 pa_log_info("Device suspended...");
497 static pa_usec_t
hw_sleep_time(struct userdata
*u
) {
502 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
504 if (usec
== (pa_usec_t
) -1)
505 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
507 pa_log_debug("hw buffer time: %u ms", (unsigned) (usec
/ PA_USEC_PER_MSEC
));
509 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
517 pa_log_debug("Decreasing wakeup time for the first iteration by half.");
521 pa_log_debug("after watermark: %u ms", (unsigned) (usec
/ PA_USEC_PER_MSEC
));
526 static int update_sw_params(struct userdata
*u
) {
527 snd_pcm_uframes_t avail_min
;
532 /* Use the full buffer if noone asked us for anything specific */
533 u
->hwbuf_unused_frames
= 0;
538 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
541 pa_log_debug("latency set to %0.2f", (double) latency
/ PA_USEC_PER_MSEC
);
543 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
545 /* We need at least one sample in our buffer */
547 if (PA_UNLIKELY(b
< u
->frame_size
))
550 u
->hwbuf_unused_frames
=
551 PA_LIKELY(b
< u
->hwbuf_size
) ?
552 ((u
->hwbuf_size
- b
) / u
->frame_size
) : 0;
554 fix_tsched_watermark(u
);
558 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u
->hwbuf_unused_frames
);
560 /* We need at last one frame in the used part of the buffer */
561 avail_min
= u
->hwbuf_unused_frames
+ 1;
566 usec
= hw_sleep_time(u
);
567 avail_min
+= pa_usec_to_bytes(usec
, &u
->sink
->sample_spec
);
570 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
572 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
573 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
580 static int unsuspend(struct userdata
*u
) {
585 snd_pcm_uframes_t period_size
;
588 pa_assert(!u
->pcm_handle
);
590 pa_log_info("Trying resume...");
592 snd_config_update_free_global();
593 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
, SND_PCM_NONBLOCK
)) < 0) {
594 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
598 ss
= u
->sink
->sample_spec
;
599 nfrags
= u
->nfragments
;
600 period_size
= u
->fragment_size
/ u
->frame_size
;
604 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
605 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
609 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
610 pa_log_warn("Resume failed, couldn't get original access mode.");
614 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
615 pa_log_warn("Resume failed, couldn't restore original sample settings.");
619 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
620 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
624 if (update_sw_params(u
) < 0)
627 if (build_pollfd(u
) < 0)
630 /* FIXME: We need to reload the volume somehow */
634 pa_log_info("Resumed successfully...");
640 snd_pcm_close(u
->pcm_handle
);
641 u
->pcm_handle
= NULL
;
647 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
648 struct userdata
*u
= PA_SINK(o
)->userdata
;
652 case PA_SINK_MESSAGE_GET_LATENCY
: {
656 r
= sink_get_latency(u
);
658 *((pa_usec_t
*) data
) = r
;
663 case PA_SINK_MESSAGE_SET_STATE
:
665 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
667 case PA_SINK_SUSPENDED
:
668 pa_assert(PA_SINK_OPENED(u
->sink
->thread_info
.state
));
676 case PA_SINK_RUNNING
:
678 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
679 if (build_pollfd(u
) < 0)
683 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
684 if (unsuspend(u
) < 0)
690 case PA_SINK_UNLINKED
:
698 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
701 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
702 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
705 pa_assert(u
->mixer_handle
);
707 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
710 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
711 pa_sink_get_volume(u
->sink
);
712 pa_sink_get_mute(u
->sink
);
718 static int sink_get_volume_cb(pa_sink
*s
) {
719 struct userdata
*u
= s
->userdata
;
724 pa_assert(u
->mixer_elem
);
726 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
729 pa_assert(snd_mixer_selem_has_playback_channel(u
->mixer_elem
, u
->mixer_map
[i
]));
731 if (u
->hw_dB_supported
) {
733 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) >= 0) {
734 s
->volume
.values
[i
] = pa_sw_volume_from_dB(alsa_vol
/ 100.0);
738 u
->hw_dB_supported
= FALSE
;
741 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
744 s
->volume
.values
[i
] = (pa_volume_t
) roundf(((float) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
750 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
755 static int sink_set_volume_cb(pa_sink
*s
) {
756 struct userdata
*u
= s
->userdata
;
761 pa_assert(u
->mixer_elem
);
763 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
767 pa_assert(snd_mixer_selem_has_playback_channel(u
->mixer_elem
, u
->mixer_map
[i
]));
769 vol
= PA_MIN(s
->volume
.values
[i
], PA_VOLUME_NORM
);
771 if (u
->hw_dB_supported
) {
772 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
773 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
775 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, -1)) >= 0) {
777 if (snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
) >= 0)
778 s
->volume
.values
[i
] = pa_sw_volume_from_dB(alsa_vol
/ 100.0);
783 u
->hw_dB_supported
= FALSE
;
787 alsa_vol
= (long) roundf(((float) vol
* (u
->hw_volume_max
- u
->hw_volume_min
)) / PA_VOLUME_NORM
) + u
->hw_volume_min
;
788 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
790 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
793 if (snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
) >= 0)
794 s
->volume
.values
[i
] = (pa_volume_t
) roundf(((float) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) / (u
->hw_volume_max
- u
->hw_volume_min
));
800 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
805 static int sink_get_mute_cb(pa_sink
*s
) {
806 struct userdata
*u
= s
->userdata
;
810 pa_assert(u
->mixer_elem
);
812 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
813 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
822 static int sink_set_mute_cb(pa_sink
*s
) {
823 struct userdata
*u
= s
->userdata
;
827 pa_assert(u
->mixer_elem
);
829 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
830 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
837 static void sink_update_requested_latency_cb(pa_sink
*s
) {
838 struct userdata
*u
= s
->userdata
;
844 static int process_rewind(struct userdata
*u
) {
845 snd_pcm_sframes_t unused
;
846 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
849 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
850 u
->sink
->thread_info
.rewind_nbytes
= 0;
852 pa_assert(rewind_nbytes
> 0);
853 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
855 snd_pcm_hwsync(u
->pcm_handle
);
856 if ((unused
= snd_pcm_avail_update(u
->pcm_handle
)) < 0) {
857 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused
));
861 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
863 if (u
->hwbuf_size
> unused_nbytes
)
864 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
868 if (rewind_nbytes
> limit_nbytes
)
869 rewind_nbytes
= limit_nbytes
;
871 if (rewind_nbytes
> 0) {
872 snd_pcm_sframes_t in_frames
, out_frames
;
874 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
876 in_frames
= (snd_pcm_sframes_t
) rewind_nbytes
/ u
->frame_size
;
877 pa_log_debug("before: %lu", (unsigned long) in_frames
);
878 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, in_frames
)) < 0) {
879 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames
));
882 pa_log_debug("after: %lu", (unsigned long) out_frames
);
884 rewind_nbytes
= out_frames
* u
->frame_size
;
886 if (rewind_nbytes
<= 0)
887 pa_log_info("Tried rewind, but was apparently not possible.");
889 u
->frame_index
-= out_frames
;
890 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
891 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
893 u
->after_rewind
= TRUE
;
896 pa_log_debug("Mhmm, actually there is nothing to rewind.");
901 static void thread_func(void *userdata
) {
902 struct userdata
*u
= userdata
;
906 pa_log_debug("Thread starting up");
908 if (u
->core
->realtime_scheduling
)
909 pa_make_realtime(u
->core
->realtime_priority
);
911 pa_thread_mq_install(&u
->thread_mq
);
912 pa_rtpoll_install(u
->rtpoll
);
917 /* pa_log_debug("loop"); */
919 /* Render some data and write it to the dsp */
920 if (PA_SINK_OPENED(u
->sink
->thread_info
.state
)) {
923 if (u
->sink
->thread_info
.rewind_nbytes
> 0)
924 if (process_rewind(u
) < 0)
928 work_done
= mmap_write(u
);
930 work_done
= unix_write(u
);
935 /* pa_log_debug("work_done = %i", work_done); */
940 pa_log_info("Starting playback.");
941 snd_pcm_start(u
->pcm_handle
);
943 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
950 pa_usec_t usec
, cusec
;
952 /* OK, the playback buffer is now full, let's
953 * calculate when to wake up next */
955 usec
= hw_sleep_time(u
);
957 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) usec / PA_USEC_PER_MSEC); */
959 /* Convert from the sound card time domain to the
960 * system time domain */
961 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), usec
);
963 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
965 /* We don't trust the conversion, so we wake up whatever comes first */
966 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(usec
, cusec
));
970 u
->after_rewind
= FALSE
;
972 } else if (u
->use_tsched
)
974 /* OK, we're in an invalid state, let's disable our timers */
975 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
977 /* Hmm, nothing to do. Let's sleep */
978 if ((ret
= pa_rtpoll_run(u
->rtpoll
, 1)) < 0)
984 /* Tell ALSA about this and process its response */
985 if (PA_SINK_OPENED(u
->sink
->thread_info
.state
)) {
986 struct pollfd
*pollfd
;
987 unsigned short revents
= 0;
991 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
993 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
994 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
998 if (revents
& (POLLERR
|POLLNVAL
|POLLHUP
)) {
999 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1006 pa_log_debug("Wakeup from ALSA! (%i)", revents
);
1011 /* If this was no regular exit from the loop we have to continue
1012 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1013 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1014 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1017 pa_log_debug("Thread shutting down");
1020 int pa__init(pa_module
*m
) {
1022 pa_modargs
*ma
= NULL
;
1023 struct userdata
*u
= NULL
;
1027 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1028 snd_pcm_uframes_t period_frames
, tsched_frames
;
1030 snd_pcm_info_t
*pcm_info
= NULL
;
1033 char *name_buf
= NULL
;
1034 pa_bool_t namereg_fail
;
1035 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, mixer_reset
= TRUE
;
1037 pa_sink_new_data data
;
1039 snd_pcm_info_alloca(&pcm_info
);
1043 pa_alsa_redirect_errors_inc();
1045 if (!(ma
= pa_modargs_new(m
->argument
, valid_modargs
))) {
1046 pa_log("Failed to parse module arguments");
1050 ss
= m
->core
->default_sample_spec
;
1051 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1052 pa_log("Failed to parse sample specification and channel map");
1056 frame_size
= pa_frame_size(&ss
);
1058 nfrags
= m
->core
->default_n_fragments
;
1059 frag_size
= pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1061 frag_size
= frame_size
;
1062 tsched_size
= pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1063 tsched_watermark
= pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1065 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1066 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1067 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1068 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1069 pa_log("Failed to parse buffer metrics");
1073 hwbuf_size
= frag_size
* nfrags
;
1074 period_frames
= frag_size
/frame_size
;
1075 tsched_frames
= tsched_size
/frame_size
;
1077 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1078 pa_log("Failed to parse mmap argument.");
1082 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1083 pa_log("Failed to parse timer_scheduling argument.");
1087 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1088 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1092 if (pa_modargs_get_value_boolean(ma
, "mixer_reset", &mixer_reset
) < 0) {
1093 pa_log("Failed to parse mixer_reset argument.");
1097 u
= pa_xnew0(struct userdata
, 1);
1101 u
->use_mmap
= use_mmap
;
1102 u
->use_tsched
= use_tsched
;
1104 u
->after_rewind
= FALSE
;
1105 u
->rtpoll
= pa_rtpoll_new();
1106 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1107 u
->alsa_rtpoll_item
= NULL
;
1109 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
);
1110 usec
= pa_rtclock_usec();
1111 pa_smoother_set_time_offset(u
->smoother
, usec
);
1112 pa_smoother_pause(u
->smoother
, usec
);
1114 snd_config_update_free_global();
1119 if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1121 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id(
1125 SND_PCM_STREAM_PLAYBACK
,
1126 &nfrags
, &period_frames
, tsched_frames
,
1133 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1134 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1137 SND_PCM_STREAM_PLAYBACK
,
1138 &nfrags
, &period_frames
, tsched_frames
,
1144 pa_assert(u
->device_name
);
1145 pa_log_info("Successfully opened device %s.", u
->device_name
);
1147 if (use_mmap
&& !b
) {
1148 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1149 u
->use_mmap
= use_mmap
= FALSE
;
1152 if (use_tsched
&& (!b
|| !d
)) {
1153 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1154 u
->use_tsched
= use_tsched
= FALSE
;
1158 pa_log_info("Successfully enabled mmap() mode.");
1161 pa_log_info("Successfully enabled timer-based scheduling mode.");
1163 if ((err
= snd_pcm_info(u
->pcm_handle
, pcm_info
)) < 0) {
1164 pa_log("Error fetching PCM info: %s", snd_strerror(err
));
1168 /* ALSA might tweak the sample spec, so recalculate the frame size */
1169 frame_size
= pa_frame_size(&ss
);
1171 if ((err
= snd_mixer_open(&u
->mixer_handle
, 0)) < 0)
1172 pa_log_warn("Error opening mixer: %s", snd_strerror(err
));
1174 pa_bool_t found
= FALSE
;
1176 if (pa_alsa_prepare_mixer(u
->mixer_handle
, u
->device_name
) >= 0)
1179 snd_pcm_info_t
*info
;
1181 snd_pcm_info_alloca(&info
);
1183 if (snd_pcm_info(u
->pcm_handle
, info
) >= 0) {
1187 if ((card
= snd_pcm_info_get_card(info
)) >= 0) {
1189 md
= pa_sprintf_malloc("hw:%i", card
);
1191 if (strcmp(u
->device_name
, md
))
1192 if (pa_alsa_prepare_mixer(u
->mixer_handle
, md
) >= 0)
1200 if (!(u
->mixer_elem
= pa_alsa_find_elem(u
->mixer_handle
, "Master", "PCM")))
1204 snd_mixer_close(u
->mixer_handle
);
1205 u
->mixer_handle
= NULL
;
1209 if ((name
= pa_modargs_get_value(ma
, "sink_name", NULL
)))
1210 namereg_fail
= TRUE
;
1212 name
= name_buf
= pa_sprintf_malloc("alsa_output.%s", u
->device_name
);
1213 namereg_fail
= FALSE
;
1216 pa_sink_new_data_init(&data
);
1217 data
.driver
= __FILE__
;
1219 pa_sink_new_data_set_name(&data
, name
);
1220 data
.namereg_fail
= namereg_fail
;
1221 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1222 pa_sink_new_data_set_channel_map(&data
, &map
);
1224 pa_alsa_init_proplist(data
.proplist
, pcm_info
);
1225 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1226 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1227 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1228 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1230 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
);
1231 pa_sink_new_data_done(&data
);
1235 pa_log("Failed to create sink object");
1239 u
->sink
->parent
.process_msg
= sink_process_msg
;
1240 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1241 u
->sink
->userdata
= u
;
1243 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1244 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1246 u
->frame_size
= frame_size
;
1247 u
->fragment_size
= frag_size
= period_frames
* frame_size
;
1248 u
->nfragments
= nfrags
;
1249 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1250 u
->hwbuf_unused_frames
= 0;
1251 u
->tsched_watermark
= tsched_watermark
;
1253 u
->hw_dB_supported
= FALSE
;
1254 u
->hw_dB_min
= u
->hw_dB_max
= 0;
1255 u
->hw_volume_min
= u
->hw_volume_max
= 0;
1258 fix_tsched_watermark(u
);
1260 u
->sink
->thread_info
.max_rewind
= use_tsched
? u
->hwbuf_size
: 0;
1261 u
->sink
->max_latency
= pa_bytes_to_usec(u
->hwbuf_size
, &ss
);
1263 u
->sink
->min_latency
= u
->sink
->max_latency
;
1265 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1266 nfrags
, (long unsigned) u
->fragment_size
,
1267 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1270 pa_log_info("Time scheduling watermark is %0.2fms",
1271 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1273 if (update_sw_params(u
) < 0)
1276 pa_memchunk_reset(&u
->memchunk
);
1278 if (u
->mixer_handle
) {
1279 pa_assert(u
->mixer_elem
);
1281 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
))
1283 if (pa_alsa_calc_mixer_map(u
->mixer_elem
, &map
, u
->mixer_map
, TRUE
) >= 0 &&
1284 snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) >= 0) {
1286 pa_bool_t suitable
= TRUE
;
1288 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1290 if (u
->hw_volume_min
> u
->hw_volume_max
) {
1292 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u
->hw_volume_min
, u
->hw_volume_max
);
1295 } else if (u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1297 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1300 } else if (snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) >= 0) {
1302 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u
->hw_dB_min
/100.0, u
->hw_dB_max
/100.0);
1304 /* Let's see if this thing actually is useful for muting */
1305 if (u
->hw_dB_min
> -6000) {
1306 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u
->hw_dB_min
) / 100);
1309 } else if (u
->hw_dB_max
< 0) {
1311 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u
->hw_dB_max
) / 100);
1314 } else if (u
->hw_dB_min
>= u
->hw_dB_max
) {
1316 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u
->hw_dB_min
) / 100, ((double) u
->hw_dB_max
) / 100);
1321 if (u
->hw_dB_max
> 0) {
1322 /* dB > 0 means overamplification, and clipping, we don't want that here */
1323 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u
->hw_dB_max
) / 100);
1327 u
->hw_dB_supported
= TRUE
;
1332 u
->sink
->get_volume
= sink_get_volume_cb
;
1333 u
->sink
->set_volume
= sink_set_volume_cb
;
1334 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1335 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1337 } else if (mixer_reset
) {
1338 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1339 pa_alsa_0dB_playback(u
->mixer_elem
);
1341 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1344 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1345 u
->sink
->get_mute
= sink_get_mute_cb
;
1346 u
->sink
->set_mute
= sink_set_mute_cb
;
1347 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1350 u
->mixer_fdl
= pa_alsa_fdlist_new();
1352 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, m
->core
->mainloop
) < 0) {
1353 pa_log("Failed to initialize file descriptor monitoring");
1357 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1358 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1360 u
->mixer_fdl
= NULL
;
1362 pa_alsa_dump(u
->pcm_handle
);
1364 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1365 pa_log("Failed to create thread.");
1369 /* Get initial mixer settings */
1370 if (data
.volume_is_set
) {
1371 if (u
->sink
->set_volume
)
1372 u
->sink
->set_volume(u
->sink
);
1374 if (u
->sink
->get_volume
)
1375 u
->sink
->get_volume(u
->sink
);
1378 if (data
.muted_is_set
) {
1379 if (u
->sink
->set_mute
)
1380 u
->sink
->set_mute(u
->sink
);
1382 if (u
->sink
->get_mute
)
1383 u
->sink
->get_mute(u
->sink
);
1386 pa_sink_put(u
->sink
);
1388 pa_modargs_free(ma
);
1395 pa_modargs_free(ma
);
1402 void pa__done(pa_module
*m
) {
1407 if (!(u
= m
->userdata
)) {
1408 pa_alsa_redirect_errors_dec();
1413 pa_sink_unlink(u
->sink
);
1416 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1417 pa_thread_free(u
->thread
);
1420 pa_thread_mq_done(&u
->thread_mq
);
1423 pa_sink_unref(u
->sink
);
1425 if (u
->memchunk
.memblock
)
1426 pa_memblock_unref(u
->memchunk
.memblock
);
1428 if (u
->alsa_rtpoll_item
)
1429 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1432 pa_rtpoll_free(u
->rtpoll
);
1435 pa_alsa_fdlist_free(u
->mixer_fdl
);
1437 if (u
->mixer_handle
)
1438 snd_mixer_close(u
->mixer_handle
);
1440 if (u
->pcm_handle
) {
1441 snd_pcm_drop(u
->pcm_handle
);
1442 snd_pcm_close(u
->pcm_handle
);
1446 pa_smoother_free(u
->smoother
);
1448 pa_xfree(u
->device_name
);
1451 snd_config_update_free_global();
1453 pa_alsa_redirect_errors_dec();