2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
56 #include <modules/reserve-wrap.h>
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
61 /* #define DEBUG_TIMING */
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
76 pa_thread_mq thread_mq
;
79 snd_pcm_t
*pcm_handle
;
81 pa_alsa_fdlist
*mixer_fdl
;
82 snd_mixer_t
*mixer_handle
;
83 snd_mixer_elem_t
*mixer_elem
;
84 long hw_volume_max
, hw_volume_min
;
85 long hw_dB_max
, hw_dB_min
;
86 pa_bool_t hw_dB_supported
:1;
87 pa_bool_t mixer_seperate_channels
:1;
88 pa_cvolume hardware_volume
;
101 pa_memchunk memchunk
;
105 pa_bool_t use_mmap
:1, use_tsched
:1;
107 pa_bool_t first
, after_rewind
;
109 pa_rtpoll_item
*alsa_rtpoll_item
;
111 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
113 pa_smoother
*smoother
;
114 uint64_t write_count
;
115 uint64_t since_start
;
117 pa_reserve_wrapper
*reserve
;
118 pa_hook_slot
*reserve_slot
;
121 static void userdata_free(struct userdata
*u
);
123 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
127 if (pa_sink_suspend(u
->sink
, TRUE
) < 0)
128 return PA_HOOK_CANCEL
;
133 static void reserve_done(struct userdata
*u
) {
136 if (u
->reserve_slot
) {
137 pa_hook_slot_free(u
->reserve_slot
);
138 u
->reserve_slot
= NULL
;
142 pa_reserve_wrapper_unref(u
->reserve
);
147 static void reserve_update(struct userdata
*u
) {
148 const char *description
;
151 if (!u
->sink
|| !u
->reserve
)
154 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
155 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
158 static int reserve_init(struct userdata
*u
, const char *dname
) {
167 if (pa_in_system_mode())
170 /* We are resuming, try to lock the device */
171 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
174 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
182 pa_assert(!u
->reserve_slot
);
183 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
188 static void fix_min_sleep_wakeup(struct userdata
*u
) {
189 size_t max_use
, max_use_2
;
193 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
194 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
196 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
197 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
199 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
200 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
203 static void fix_tsched_watermark(struct userdata
*u
) {
207 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
209 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
210 u
->tsched_watermark
= max_use
- u
->min_sleep
;
212 if (u
->tsched_watermark
< u
->min_wakeup
)
213 u
->tsched_watermark
= u
->min_wakeup
;
216 static void adjust_after_underrun(struct userdata
*u
) {
217 size_t old_watermark
;
218 pa_usec_t old_min_latency
, new_min_latency
;
221 pa_assert(u
->use_tsched
);
223 /* First, just try to increase the watermark */
224 old_watermark
= u
->tsched_watermark
;
225 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_step
);
226 fix_tsched_watermark(u
);
228 if (old_watermark
!= u
->tsched_watermark
) {
229 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
230 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
234 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
235 old_min_latency
= u
->sink
->thread_info
.min_latency
;
236 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_STEP_USEC
);
237 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
239 if (old_min_latency
!= new_min_latency
) {
240 pa_log_notice("Increasing minimal latency to %0.2f ms",
241 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
243 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
247 /* When we reach this we're officialy fucked! */
250 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
253 pa_assert(sleep_usec
);
254 pa_assert(process_usec
);
258 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
260 if (usec
== (pa_usec_t
) -1)
261 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
263 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
268 *sleep_usec
= usec
- wm
;
272 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
273 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
274 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
275 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
279 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
284 pa_log_debug("%s: %s", call
, snd_strerror(err
));
286 pa_assert(err
!= -EAGAIN
);
289 pa_log_debug("%s: Buffer underrun!", call
);
291 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
292 pa_log("%s: %s", call
, snd_strerror(err
));
301 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
) {
304 /* We use <= instead of < for this check here because an underrun
305 * only happens after the last sample was processed, not already when
306 * it is removed from the buffer. This is particularly important
307 * when block transfer is used. */
309 if (n_bytes
<= u
->hwbuf_size
) {
310 left_to_play
= u
->hwbuf_size
- n_bytes
;
313 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
323 if (!u
->first
&& !u
->after_rewind
) {
325 if (pa_log_ratelimit())
326 pa_log_info("Underrun!");
329 adjust_after_underrun(u
);
336 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
337 pa_bool_t work_done
= TRUE
;
338 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
343 pa_sink_assert_ref(u
->sink
);
346 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
353 /* First we determine how many samples are missing to fill the
354 * buffer up to 100% */
356 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
358 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
364 n_bytes
= (size_t) n
* u
->frame_size
;
367 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
370 left_to_play
= check_left_to_play(u
, n_bytes
);
374 /* We won't fill up the playback buffer before at least
375 * half the sleep time is over because otherwise we might
376 * ask for more data from the clients then they expect. We
377 * need to guarantee that clients only have to keep around
378 * a single hw buffer length. */
381 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
383 pa_log_debug("Not filling up, because too early.");
388 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
392 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
393 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
394 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
395 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
401 pa_log_debug("Not filling up, because not necessary.");
409 pa_log_debug("Not filling up, because already too many iterations.");
415 n_bytes
-= u
->hwbuf_unused
;
419 pa_log_debug("Filling up");
426 const snd_pcm_channel_area_t
*areas
;
427 snd_pcm_uframes_t offset
, frames
;
428 snd_pcm_sframes_t sframes
;
430 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
431 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
433 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
435 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
441 /* Make sure that if these memblocks need to be copied they will fit into one slot */
442 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
443 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
445 /* Check these are multiples of 8 bit */
446 pa_assert((areas
[0].first
& 7) == 0);
447 pa_assert((areas
[0].step
& 7)== 0);
449 /* We assume a single interleaved memory buffer */
450 pa_assert((areas
[0].first
>> 3) == 0);
451 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
453 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
455 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
456 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
459 pa_sink_render_into_full(u
->sink
, &chunk
);
460 pa_memblock_unref_fixed(chunk
.memblock
);
462 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
464 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
472 u
->write_count
+= frames
* u
->frame_size
;
473 u
->since_start
+= frames
* u
->frame_size
;
476 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames
* u
->frame_size
));
479 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
482 n_bytes
-= (size_t) frames
* u
->frame_size
;
486 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
487 return work_done
? 1 : 0;
490 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
491 pa_bool_t work_done
= FALSE
;
492 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
497 pa_sink_assert_ref(u
->sink
);
500 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
507 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
509 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
515 n_bytes
= (size_t) n
* u
->frame_size
;
516 left_to_play
= check_left_to_play(u
, n_bytes
);
520 /* We won't fill up the playback buffer before at least
521 * half the sleep time is over because otherwise we might
522 * ask for more data from the clients then they expect. We
523 * need to guarantee that clients only have to keep around
524 * a single hw buffer length. */
527 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
530 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
534 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
535 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
536 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
537 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
547 pa_log_debug("Not filling up, because already too many iterations.");
553 n_bytes
-= u
->hwbuf_unused
;
557 snd_pcm_sframes_t frames
;
560 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
562 if (u
->memchunk
.length
<= 0)
563 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
565 pa_assert(u
->memchunk
.length
> 0);
567 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
569 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
570 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
572 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
573 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
574 pa_memblock_release(u
->memchunk
.memblock
);
576 pa_assert(frames
!= 0);
578 if (PA_UNLIKELY(frames
< 0)) {
580 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
586 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
587 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
589 if (u
->memchunk
.length
<= 0) {
590 pa_memblock_unref(u
->memchunk
.memblock
);
591 pa_memchunk_reset(&u
->memchunk
);
596 u
->write_count
+= frames
* u
->frame_size
;
597 u
->since_start
+= frames
* u
->frame_size
;
599 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
601 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
604 n_bytes
-= (size_t) frames
* u
->frame_size
;
608 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) - process_usec
;
609 return work_done
? 1 : 0;
612 static void update_smoother(struct userdata
*u
) {
613 snd_pcm_sframes_t delay
= 0;
616 pa_usec_t now1
= 0, now2
;
617 snd_pcm_status_t
*status
;
619 snd_pcm_status_alloca(&status
);
622 pa_assert(u
->pcm_handle
);
624 /* Let's update the time smoother */
626 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
627 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err
));
631 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
632 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err
));
634 snd_htimestamp_t htstamp
= { 0, 0 };
635 snd_pcm_status_get_htstamp(status
, &htstamp
);
636 now1
= pa_timespec_load(&htstamp
);
639 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
641 if (PA_UNLIKELY(position
< 0))
644 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
646 now1
= pa_rtclock_usec();
648 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
650 pa_smoother_put(u
->smoother
, now1
, now2
);
653 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
656 pa_usec_t now1
, now2
;
660 now1
= pa_rtclock_usec();
661 now2
= pa_smoother_get(u
->smoother
, now1
);
663 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
665 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
667 if (u
->memchunk
.memblock
)
668 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
673 static int build_pollfd(struct userdata
*u
) {
675 pa_assert(u
->pcm_handle
);
677 if (u
->alsa_rtpoll_item
)
678 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
680 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
686 /* Called from IO context */
687 static int suspend(struct userdata
*u
) {
689 pa_assert(u
->pcm_handle
);
691 pa_smoother_pause(u
->smoother
, pa_rtclock_usec());
693 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
694 * take awfully long with our long buffer sizes today. */
695 snd_pcm_close(u
->pcm_handle
);
696 u
->pcm_handle
= NULL
;
698 if (u
->alsa_rtpoll_item
) {
699 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
700 u
->alsa_rtpoll_item
= NULL
;
703 pa_log_info("Device suspended...");
708 /* Called from IO context */
709 static int update_sw_params(struct userdata
*u
) {
710 snd_pcm_uframes_t avail_min
;
715 /* Use the full buffer if noone asked us for anything specific */
721 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
724 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
726 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
728 /* We need at least one sample in our buffer */
730 if (PA_UNLIKELY(b
< u
->frame_size
))
733 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
736 fix_min_sleep_wakeup(u
);
737 fix_tsched_watermark(u
);
740 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
742 /* We need at last one frame in the used part of the buffer */
743 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
746 pa_usec_t sleep_usec
, process_usec
;
748 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
749 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
752 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
754 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
755 pa_log("Failed to set software parameters: %s", snd_strerror(err
));
759 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
764 /* Called from IO context */
765 static int unsuspend(struct userdata
*u
) {
770 snd_pcm_uframes_t period_size
;
773 pa_assert(!u
->pcm_handle
);
775 pa_log_info("Trying resume...");
777 snd_config_update_free_global();
778 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
779 /*SND_PCM_NONBLOCK|*/
780 SND_PCM_NO_AUTO_RESAMPLE
|
781 SND_PCM_NO_AUTO_CHANNELS
|
782 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
783 pa_log("Error opening PCM device %s: %s", u
->device_name
, snd_strerror(err
));
787 ss
= u
->sink
->sample_spec
;
788 nfrags
= u
->nfragments
;
789 period_size
= u
->fragment_size
/ u
->frame_size
;
793 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
794 pa_log("Failed to set hardware parameters: %s", snd_strerror(err
));
798 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
799 pa_log_warn("Resume failed, couldn't get original access mode.");
803 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
804 pa_log_warn("Resume failed, couldn't restore original sample settings.");
808 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
809 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
810 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
811 (unsigned long) nfrags
, period_size
* u
->frame_size
);
815 if (update_sw_params(u
) < 0)
818 if (build_pollfd(u
) < 0)
824 pa_log_info("Resumed successfully...");
830 snd_pcm_close(u
->pcm_handle
);
831 u
->pcm_handle
= NULL
;
837 /* Called from IO context */
838 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
839 struct userdata
*u
= PA_SINK(o
)->userdata
;
843 case PA_SINK_MESSAGE_GET_LATENCY
: {
847 r
= sink_get_latency(u
);
849 *((pa_usec_t
*) data
) = r
;
854 case PA_SINK_MESSAGE_SET_STATE
:
856 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
858 case PA_SINK_SUSPENDED
:
859 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
867 case PA_SINK_RUNNING
:
869 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
870 if (build_pollfd(u
) < 0)
874 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
875 if (unsuspend(u
) < 0)
881 case PA_SINK_UNLINKED
:
883 case PA_SINK_INVALID_STATE
:
890 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
893 /* Called from main context */
894 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
895 pa_sink_state_t old_state
;
898 pa_sink_assert_ref(s
);
899 pa_assert_se(u
= s
->userdata
);
901 old_state
= pa_sink_get_state(u
->sink
);
903 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
905 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
906 if (reserve_init(u
, u
->device_name
) < 0)
912 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
913 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
916 pa_assert(u
->mixer_handle
);
918 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
921 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
922 pa_sink_get_volume(u
->sink
, TRUE
);
923 pa_sink_get_mute(u
->sink
, TRUE
);
929 static pa_volume_t
from_alsa_volume(struct userdata
*u
, long alsa_vol
) {
931 return (pa_volume_t
) round(((double) (alsa_vol
- u
->hw_volume_min
) * PA_VOLUME_NORM
) /
932 (double) (u
->hw_volume_max
- u
->hw_volume_min
));
935 static long to_alsa_volume(struct userdata
*u
, pa_volume_t vol
) {
938 alsa_vol
= (long) round(((double) vol
* (double) (u
->hw_volume_max
- u
->hw_volume_min
))
939 / PA_VOLUME_NORM
) + u
->hw_volume_min
;
941 return PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_volume_min
, u
->hw_volume_max
);
944 static void sink_get_volume_cb(pa_sink
*s
) {
945 struct userdata
*u
= s
->userdata
;
949 char t
[PA_CVOLUME_SNPRINT_MAX
];
952 pa_assert(u
->mixer_elem
);
954 if (u
->mixer_seperate_channels
) {
956 r
.channels
= s
->sample_spec
.channels
;
958 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
961 if (u
->hw_dB_supported
) {
963 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
966 #ifdef HAVE_VALGRIND_MEMCHECK_H
967 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
970 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
973 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
976 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
983 if (u
->hw_dB_supported
) {
985 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
988 #ifdef HAVE_VALGRIND_MEMCHECK_H
989 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
992 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
996 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
999 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
1003 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1005 if (!pa_cvolume_equal(&u
->hardware_volume
, &r
)) {
1007 s
->virtual_volume
= u
->hardware_volume
= r
;
1009 if (u
->hw_dB_supported
) {
1012 /* Hmm, so the hardware volume changed, let's reset our software volume */
1013 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
1014 pa_sink_set_soft_volume(s
, &reset
);
1021 pa_log_error("Unable to read volume: %s", snd_strerror(err
));
1024 static void sink_set_volume_cb(pa_sink
*s
) {
1025 struct userdata
*u
= s
->userdata
;
1031 pa_assert(u
->mixer_elem
);
1033 if (u
->mixer_seperate_channels
) {
1035 r
.channels
= s
->sample_spec
.channels
;
1037 for (i
= 0; i
< s
->sample_spec
.channels
; i
++) {
1041 vol
= s
->virtual_volume
.values
[i
];
1043 if (u
->hw_dB_supported
) {
1045 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
1046 alsa_vol
+= u
->hw_dB_max
;
1047 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
1049 if ((err
= snd_mixer_selem_set_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
, 1)) < 0)
1052 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
1055 #ifdef HAVE_VALGRIND_MEMCHECK_H
1056 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
1059 r
.values
[i
] = pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0);
1062 alsa_vol
= to_alsa_volume(u
, vol
);
1064 if ((err
= snd_mixer_selem_set_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], alsa_vol
)) < 0)
1067 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, u
->mixer_map
[i
], &alsa_vol
)) < 0)
1070 r
.values
[i
] = from_alsa_volume(u
, alsa_vol
);
1078 vol
= pa_cvolume_max(&s
->virtual_volume
);
1080 if (u
->hw_dB_supported
) {
1081 alsa_vol
= (long) (pa_sw_volume_to_dB(vol
) * 100);
1082 alsa_vol
+= u
->hw_dB_max
;
1083 alsa_vol
= PA_CLAMP_UNLIKELY(alsa_vol
, u
->hw_dB_min
, u
->hw_dB_max
);
1085 if ((err
= snd_mixer_selem_set_playback_dB_all(u
->mixer_elem
, alsa_vol
, 1)) < 0)
1088 if ((err
= snd_mixer_selem_get_playback_dB(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
1091 #ifdef HAVE_VALGRIND_MEMCHECK_H
1092 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol
, sizeof(alsa_vol
));
1095 pa_cvolume_set(&r
, s
->sample_spec
.channels
, pa_sw_volume_from_dB((double) (alsa_vol
- u
->hw_dB_max
) / 100.0));
1098 alsa_vol
= to_alsa_volume(u
, vol
);
1100 if ((err
= snd_mixer_selem_set_playback_volume_all(u
->mixer_elem
, alsa_vol
)) < 0)
1103 if ((err
= snd_mixer_selem_get_playback_volume(u
->mixer_elem
, SND_MIXER_SCHN_MONO
, &alsa_vol
)) < 0)
1106 pa_cvolume_set(&r
, s
->sample_spec
.channels
, from_alsa_volume(u
, alsa_vol
));
1110 u
->hardware_volume
= r
;
1112 if (u
->hw_dB_supported
) {
1113 char t
[PA_CVOLUME_SNPRINT_MAX
];
1115 /* Match exactly what the user requested by software */
1116 pa_sw_cvolume_divide(&s
->soft_volume
, &s
->virtual_volume
, &u
->hardware_volume
);
1118 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->virtual_volume
));
1119 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1120 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->soft_volume
));
1124 /* We can't match exactly what the user requested, hence let's
1125 * at least tell the user about it */
1127 s
->virtual_volume
= r
;
1132 pa_log_error("Unable to set volume: %s", snd_strerror(err
));
1135 static void sink_get_mute_cb(pa_sink
*s
) {
1136 struct userdata
*u
= s
->userdata
;
1140 pa_assert(u
->mixer_elem
);
1142 if ((err
= snd_mixer_selem_get_playback_switch(u
->mixer_elem
, 0, &sw
)) < 0) {
1143 pa_log_error("Unable to get switch: %s", snd_strerror(err
));
1150 static void sink_set_mute_cb(pa_sink
*s
) {
1151 struct userdata
*u
= s
->userdata
;
1155 pa_assert(u
->mixer_elem
);
1157 if ((err
= snd_mixer_selem_set_playback_switch_all(u
->mixer_elem
, !s
->muted
)) < 0) {
1158 pa_log_error("Unable to set switch: %s", snd_strerror(err
));
1163 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1164 struct userdata
*u
= s
->userdata
;
1171 before
= u
->hwbuf_unused
;
1172 update_sw_params(u
);
1174 /* Let's check whether we now use only a smaller part of the
1175 buffer then before. If so, we need to make sure that subsequent
1176 rewinds are relative to the new maximum fill level and not to the
1177 current fill level. Thus, let's do a full rewind once, to clear
1180 if (u
->hwbuf_unused
> before
) {
1181 pa_log_debug("Requesting rewind due to latency change.");
1182 pa_sink_request_rewind(s
, (size_t) -1);
1186 static int process_rewind(struct userdata
*u
) {
1187 snd_pcm_sframes_t unused
;
1188 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1191 /* Figure out how much we shall rewind and reset the counter */
1192 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1194 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1196 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1197 pa_log("snd_pcm_avail() failed: %s", snd_strerror((int) unused
));
1201 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1203 if (u
->hwbuf_size
> unused_nbytes
)
1204 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1208 if (rewind_nbytes
> limit_nbytes
)
1209 rewind_nbytes
= limit_nbytes
;
1211 if (rewind_nbytes
> 0) {
1212 snd_pcm_sframes_t in_frames
, out_frames
;
1214 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1216 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1217 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1218 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1219 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames
));
1222 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1224 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1226 if (rewind_nbytes
<= 0)
1227 pa_log_info("Tried rewind, but was apparently not possible.");
1229 u
->write_count
-= out_frames
* u
->frame_size
;
1230 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1231 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1233 u
->after_rewind
= TRUE
;
1237 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1239 pa_sink_process_rewind(u
->sink
, 0);
1243 static void thread_func(void *userdata
) {
1244 struct userdata
*u
= userdata
;
1245 unsigned short revents
= 0;
1249 pa_log_debug("Thread starting up");
1251 if (u
->core
->realtime_scheduling
)
1252 pa_make_realtime(u
->core
->realtime_priority
);
1254 pa_thread_mq_install(&u
->thread_mq
);
1255 pa_rtpoll_install(u
->rtpoll
);
1261 pa_log_debug("Loop");
1264 /* Render some data and write it to the dsp */
1265 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1267 pa_usec_t sleep_usec
= 0;
1269 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1270 if (process_rewind(u
) < 0)
1274 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1276 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1281 /* pa_log_debug("work_done = %i", work_done); */
1286 pa_log_info("Starting playback.");
1287 snd_pcm_start(u
->pcm_handle
);
1289 pa_smoother_resume(u
->smoother
, pa_rtclock_usec());
1295 if (u
->use_tsched
) {
1298 if (u
->since_start
<= u
->hwbuf_size
) {
1300 /* USB devices on ALSA seem to hit a buffer
1301 * underrun during the first iterations much
1302 * quicker then we calculate here, probably due to
1303 * the transport latency. To accommodate for that
1304 * we artificially decrease the sleep time until
1305 * we have filled the buffer at least once
1308 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1312 /* OK, the playback buffer is now full, let's
1313 * calculate when to wake up next */
1314 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1316 /* Convert from the sound card time domain to the
1317 * system time domain */
1318 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_usec(), sleep_usec
);
1320 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1322 /* We don't trust the conversion, so we wake up whatever comes first */
1323 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1327 u
->after_rewind
= FALSE
;
1329 } else if (u
->use_tsched
)
1331 /* OK, we're in an invalid state, let's disable our timers */
1332 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1334 /* Hmm, nothing to do. Let's sleep */
1335 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1341 /* Tell ALSA about this and process its response */
1342 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1343 struct pollfd
*pollfd
;
1347 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1349 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1350 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err
));
1354 if (revents
& ~POLLOUT
) {
1355 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1360 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1361 pa_log_debug("Wakeup from ALSA!");
1368 /* If this was no regular exit from the loop we have to continue
1369 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1370 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1371 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1374 pa_log_debug("Thread shutting down");
1377 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
) {
1383 pa_assert(device_name
);
1385 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1386 pa_sink_new_data_set_name(data
, n
);
1387 data
->namereg_fail
= TRUE
;
1391 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1392 data
->namereg_fail
= TRUE
;
1394 n
= device_id
? device_id
: device_name
;
1395 data
->namereg_fail
= FALSE
;
1398 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1399 pa_sink_new_data_set_name(data
, t
);
1403 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1406 if (!u
->mixer_handle
)
1409 pa_assert(u
->mixer_elem
);
1411 if (snd_mixer_selem_has_playback_volume(u
->mixer_elem
)) {
1412 pa_bool_t suitable
= FALSE
;
1414 if (snd_mixer_selem_get_playback_volume_range(u
->mixer_elem
, &u
->hw_volume_min
, &u
->hw_volume_max
) < 0)
1415 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1416 else if (u
->hw_volume_min
>= u
->hw_volume_max
)
1417 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u
->hw_volume_min
, u
->hw_volume_max
);
1419 pa_log_info("Volume ranges from %li to %li.", u
->hw_volume_min
, u
->hw_volume_max
);
1424 if (ignore_dB
|| snd_mixer_selem_get_playback_dB_range(u
->mixer_elem
, &u
->hw_dB_min
, &u
->hw_dB_max
) < 0)
1425 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1427 #ifdef HAVE_VALGRIND_MEMCHECK_H
1428 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_min
, sizeof(u
->hw_dB_min
));
1429 VALGRIND_MAKE_MEM_DEFINED(&u
->hw_dB_max
, sizeof(u
->hw_dB_max
));
1432 if (u
->hw_dB_min
>= u
->hw_dB_max
)
1433 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1435 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u
->hw_dB_min
/100.0, (double) u
->hw_dB_max
/100.0);
1436 u
->hw_dB_supported
= TRUE
;
1438 if (u
->hw_dB_max
> 0) {
1439 u
->sink
->base_volume
= pa_sw_volume_from_dB(- (double) u
->hw_dB_max
/100.0);
1440 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1442 pa_log_info("No particular base volume set, fixing to 0 dB");
1446 if (!u
->hw_dB_supported
&&
1447 u
->hw_volume_max
- u
->hw_volume_min
< 3) {
1449 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1455 u
->mixer_seperate_channels
= pa_alsa_calc_mixer_map(u
->mixer_elem
, &u
->sink
->channel_map
, u
->mixer_map
, TRUE
) >= 0;
1457 u
->sink
->get_volume
= sink_get_volume_cb
;
1458 u
->sink
->set_volume
= sink_set_volume_cb
;
1459 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->hw_dB_supported
? PA_SINK_DECIBEL_VOLUME
: 0);
1460 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->hw_dB_supported
? "supported" : "not supported");
1462 if (!u
->hw_dB_supported
)
1463 u
->sink
->n_volume_steps
= u
->hw_volume_max
- u
->hw_volume_min
+ 1;
1465 pa_log_info("Using software volume control.");
1468 if (snd_mixer_selem_has_playback_switch(u
->mixer_elem
)) {
1469 u
->sink
->get_mute
= sink_get_mute_cb
;
1470 u
->sink
->set_mute
= sink_set_mute_cb
;
1471 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1473 pa_log_info("Using software mute control.");
1475 u
->mixer_fdl
= pa_alsa_fdlist_new();
1477 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1478 pa_log("Failed to initialize file descriptor monitoring");
1482 snd_mixer_elem_set_callback(u
->mixer_elem
, mixer_callback
);
1483 snd_mixer_elem_set_callback_private(u
->mixer_elem
, u
);
1488 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, const pa_alsa_profile_info
*profile
) {
1490 struct userdata
*u
= NULL
;
1491 const char *dev_id
= NULL
;
1492 pa_sample_spec ss
, requested_ss
;
1494 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1495 snd_pcm_uframes_t period_frames
, tsched_frames
;
1497 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1499 pa_sink_new_data data
;
1504 ss
= m
->core
->default_sample_spec
;
1505 map
= m
->core
->default_channel_map
;
1506 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1507 pa_log("Failed to parse sample specification and channel map");
1512 frame_size
= pa_frame_size(&ss
);
1514 nfrags
= m
->core
->default_n_fragments
;
1515 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1517 frag_size
= (uint32_t) frame_size
;
1518 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1519 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1521 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1522 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1523 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1524 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1525 pa_log("Failed to parse buffer metrics");
1529 hwbuf_size
= frag_size
* nfrags
;
1530 period_frames
= frag_size
/frame_size
;
1531 tsched_frames
= tsched_size
/frame_size
;
1533 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1534 pa_log("Failed to parse mmap argument.");
1538 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1539 pa_log("Failed to parse tsched argument.");
1543 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1544 pa_log("Failed to parse ignore_dB argument.");
1548 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1549 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1553 u
= pa_xnew0(struct userdata
, 1);
1556 u
->use_mmap
= use_mmap
;
1557 u
->use_tsched
= use_tsched
;
1559 u
->rtpoll
= pa_rtpoll_new();
1560 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1562 u
->smoother
= pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC
*2, DEFAULT_TSCHED_BUFFER_USEC
*2, TRUE
, 5);
1563 usec
= pa_rtclock_usec();
1564 pa_smoother_set_time_offset(u
->smoother
, usec
);
1565 pa_smoother_pause(u
->smoother
, usec
);
1567 if (reserve_init(u
, pa_modargs_get_value(
1569 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
))) < 0)
1577 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1578 pa_log("device_id= not set");
1582 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_profile(
1586 SND_PCM_STREAM_PLAYBACK
,
1587 &nfrags
, &period_frames
, tsched_frames
,
1592 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1594 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1598 SND_PCM_STREAM_PLAYBACK
,
1599 &nfrags
, &period_frames
, tsched_frames
,
1606 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1607 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1610 SND_PCM_STREAM_PLAYBACK
,
1611 &nfrags
, &period_frames
, tsched_frames
,
1617 pa_assert(u
->device_name
);
1618 pa_log_info("Successfully opened device %s.", u
->device_name
);
1621 pa_log_info("Selected configuration '%s' (%s).", profile
->description
, profile
->name
);
1623 if (use_mmap
&& !b
) {
1624 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1625 u
->use_mmap
= use_mmap
= FALSE
;
1628 if (use_tsched
&& (!b
|| !d
)) {
1629 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1630 u
->use_tsched
= use_tsched
= FALSE
;
1633 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1634 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1635 u
->use_tsched
= use_tsched
= FALSE
;
1639 pa_log_info("Successfully enabled mmap() mode.");
1642 pa_log_info("Successfully enabled timer-based scheduling mode.");
1644 /* ALSA might tweak the sample spec, so recalculate the frame size */
1645 frame_size
= pa_frame_size(&ss
);
1647 pa_alsa_find_mixer_and_elem(u
->pcm_handle
, &u
->mixer_handle
, &u
->mixer_elem
, pa_modargs_get_value(ma
, "control", NULL
));
1649 pa_sink_new_data_init(&data
);
1650 data
.driver
= driver
;
1653 set_sink_name(&data
, ma
, dev_id
, u
->device_name
);
1654 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1655 pa_sink_new_data_set_channel_map(&data
, &map
);
1657 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1658 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1659 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1660 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1661 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1664 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, profile
->name
);
1665 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, profile
->description
);
1668 pa_alsa_init_description(data
.proplist
);
1670 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1671 pa_sink_new_data_done(&data
);
1674 pa_log("Failed to create sink object");
1678 u
->sink
->parent
.process_msg
= sink_process_msg
;
1679 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1680 u
->sink
->set_state
= sink_set_state_cb
;
1681 u
->sink
->userdata
= u
;
1683 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1684 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1686 u
->frame_size
= frame_size
;
1687 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1688 u
->nfragments
= nfrags
;
1689 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1690 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1691 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1693 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1694 nfrags
, (long unsigned) u
->fragment_size
,
1695 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1697 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1699 if (u
->use_tsched
) {
1700 fix_min_sleep_wakeup(u
);
1701 fix_tsched_watermark(u
);
1703 u
->watermark_step
= pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC
, &u
->sink
->sample_spec
);
1705 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1707 pa_sink_set_latency_range(u
->sink
,
1709 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1711 pa_log_info("Time scheduling watermark is %0.2fms",
1712 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1717 if (update_sw_params(u
) < 0)
1720 if (setup_mixer(u
, ignore_dB
) < 0)
1723 pa_alsa_dump(u
->pcm_handle
);
1725 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1726 pa_log("Failed to create thread.");
1730 /* Get initial mixer settings */
1731 if (data
.volume_is_set
) {
1732 if (u
->sink
->set_volume
)
1733 u
->sink
->set_volume(u
->sink
);
1735 if (u
->sink
->get_volume
)
1736 u
->sink
->get_volume(u
->sink
);
1739 if (data
.muted_is_set
) {
1740 if (u
->sink
->set_mute
)
1741 u
->sink
->set_mute(u
->sink
);
1743 if (u
->sink
->get_mute
)
1744 u
->sink
->get_mute(u
->sink
);
1747 pa_sink_put(u
->sink
);
1758 static void userdata_free(struct userdata
*u
) {
1762 pa_sink_unlink(u
->sink
);
1765 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1766 pa_thread_free(u
->thread
);
1769 pa_thread_mq_done(&u
->thread_mq
);
1772 pa_sink_unref(u
->sink
);
1774 if (u
->memchunk
.memblock
)
1775 pa_memblock_unref(u
->memchunk
.memblock
);
1777 if (u
->alsa_rtpoll_item
)
1778 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1781 pa_rtpoll_free(u
->rtpoll
);
1784 pa_alsa_fdlist_free(u
->mixer_fdl
);
1786 if (u
->mixer_handle
)
1787 snd_mixer_close(u
->mixer_handle
);
1789 if (u
->pcm_handle
) {
1790 snd_pcm_drop(u
->pcm_handle
);
1791 snd_pcm_close(u
->pcm_handle
);
1795 pa_smoother_free(u
->smoother
);
1799 pa_xfree(u
->device_name
);
1803 void pa_alsa_sink_free(pa_sink
*s
) {
1806 pa_sink_assert_ref(s
);
1807 pa_assert_se(u
= s
->userdata
);