2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
77 pa_thread_mq thread_mq
;
80 snd_pcm_t
*pcm_handle
;
82 pa_alsa_fdlist
*mixer_fdl
;
83 snd_mixer_t
*mixer_handle
;
84 pa_alsa_path_set
*mixer_path_set
;
85 pa_alsa_path
*mixer_path
;
87 pa_cvolume hardware_volume
;
100 pa_memchunk memchunk
;
102 char *device_name
; /* name of the PCM device */
103 char *control_device
; /* name of the control device */
105 pa_bool_t use_mmap
:1, use_tsched
:1;
107 pa_bool_t first
, after_rewind
;
109 pa_rtpoll_item
*alsa_rtpoll_item
;
111 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
113 pa_smoother
*smoother
;
114 uint64_t write_count
;
115 uint64_t since_start
;
117 pa_reserve_wrapper
*reserve
;
118 pa_hook_slot
*reserve_slot
;
119 pa_reserve_monitor_wrapper
*monitor
;
120 pa_hook_slot
*monitor_slot
;
123 static void userdata_free(struct userdata
*u
);
125 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
129 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
130 return PA_HOOK_CANCEL
;
135 static void reserve_done(struct userdata
*u
) {
138 if (u
->reserve_slot
) {
139 pa_hook_slot_free(u
->reserve_slot
);
140 u
->reserve_slot
= NULL
;
144 pa_reserve_wrapper_unref(u
->reserve
);
149 static void reserve_update(struct userdata
*u
) {
150 const char *description
;
153 if (!u
->sink
|| !u
->reserve
)
156 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
157 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
160 static int reserve_init(struct userdata
*u
, const char *dname
) {
169 if (pa_in_system_mode())
172 /* We are resuming, try to lock the device */
173 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
176 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
184 pa_assert(!u
->reserve_slot
);
185 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
190 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
196 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
198 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
202 static void monitor_done(struct userdata
*u
) {
205 if (u
->monitor_slot
) {
206 pa_hook_slot_free(u
->monitor_slot
);
207 u
->monitor_slot
= NULL
;
211 pa_reserve_monitor_wrapper_unref(u
->monitor
);
216 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
222 if (pa_in_system_mode())
225 /* We are resuming, try to lock the device */
226 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
229 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
235 pa_assert(!u
->monitor_slot
);
236 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
241 static void fix_min_sleep_wakeup(struct userdata
*u
) {
242 size_t max_use
, max_use_2
;
246 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
247 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
249 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
250 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
252 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
253 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
256 static void fix_tsched_watermark(struct userdata
*u
) {
260 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
262 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
263 u
->tsched_watermark
= max_use
- u
->min_sleep
;
265 if (u
->tsched_watermark
< u
->min_wakeup
)
266 u
->tsched_watermark
= u
->min_wakeup
;
269 static void adjust_after_underrun(struct userdata
*u
) {
270 size_t old_watermark
;
271 pa_usec_t old_min_latency
, new_min_latency
;
274 pa_assert(u
->use_tsched
);
276 /* First, just try to increase the watermark */
277 old_watermark
= u
->tsched_watermark
;
278 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_step
);
279 fix_tsched_watermark(u
);
281 if (old_watermark
!= u
->tsched_watermark
) {
282 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
283 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
287 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
288 old_min_latency
= u
->sink
->thread_info
.min_latency
;
289 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_STEP_USEC
);
290 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
292 if (old_min_latency
!= new_min_latency
) {
293 pa_log_notice("Increasing minimal latency to %0.2f ms",
294 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
296 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
300 /* When we reach this we're officialy fucked! */
303 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
306 pa_assert(sleep_usec
);
307 pa_assert(process_usec
);
311 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
313 if (usec
== (pa_usec_t
) -1)
314 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
316 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
321 *sleep_usec
= usec
- wm
;
325 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
326 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
327 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
328 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
332 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
337 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
339 pa_assert(err
!= -EAGAIN
);
342 pa_log_debug("%s: Buffer underrun!", call
);
344 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
345 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
354 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
) {
357 /* We use <= instead of < for this check here because an underrun
358 * only happens after the last sample was processed, not already when
359 * it is removed from the buffer. This is particularly important
360 * when block transfer is used. */
362 if (n_bytes
<= u
->hwbuf_size
) {
363 left_to_play
= u
->hwbuf_size
- n_bytes
;
366 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
376 if (!u
->first
&& !u
->after_rewind
) {
378 if (pa_log_ratelimit())
379 pa_log_info("Underrun!");
382 adjust_after_underrun(u
);
389 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
390 pa_bool_t work_done
= TRUE
;
391 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
396 pa_sink_assert_ref(u
->sink
);
399 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
406 /* First we determine how many samples are missing to fill the
407 * buffer up to 100% */
409 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
411 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
417 n_bytes
= (size_t) n
* u
->frame_size
;
420 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
423 left_to_play
= check_left_to_play(u
, n_bytes
);
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
434 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
436 pa_log_debug("Not filling up, because too early.");
441 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
445 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
446 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
447 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
448 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
454 pa_log_debug("Not filling up, because not necessary.");
462 pa_log_debug("Not filling up, because already too many iterations.");
468 n_bytes
-= u
->hwbuf_unused
;
472 pa_log_debug("Filling up");
479 const snd_pcm_channel_area_t
*areas
;
480 snd_pcm_uframes_t offset
, frames
;
481 snd_pcm_sframes_t sframes
;
483 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
484 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
486 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
488 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
494 /* Make sure that if these memblocks need to be copied they will fit into one slot */
495 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
496 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
498 /* Check these are multiples of 8 bit */
499 pa_assert((areas
[0].first
& 7) == 0);
500 pa_assert((areas
[0].step
& 7)== 0);
502 /* We assume a single interleaved memory buffer */
503 pa_assert((areas
[0].first
>> 3) == 0);
504 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
506 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
508 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
509 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
512 pa_sink_render_into_full(u
->sink
, &chunk
);
513 pa_memblock_unref_fixed(chunk
.memblock
);
515 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
517 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
525 u
->write_count
+= frames
* u
->frame_size
;
526 u
->since_start
+= frames
* u
->frame_size
;
529 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
532 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
535 n_bytes
-= (size_t) frames
* u
->frame_size
;
539 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
541 if (*sleep_usec
> process_usec
)
542 *sleep_usec
-= process_usec
;
546 return work_done
? 1 : 0;
549 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
550 pa_bool_t work_done
= FALSE
;
551 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
556 pa_sink_assert_ref(u
->sink
);
559 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
566 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
568 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
574 n_bytes
= (size_t) n
* u
->frame_size
;
575 left_to_play
= check_left_to_play(u
, n_bytes
);
579 /* We won't fill up the playback buffer before at least
580 * half the sleep time is over because otherwise we might
581 * ask for more data from the clients then they expect. We
582 * need to guarantee that clients only have to keep around
583 * a single hw buffer length. */
586 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
589 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
593 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
594 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
595 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
596 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
606 pa_log_debug("Not filling up, because already too many iterations.");
612 n_bytes
-= u
->hwbuf_unused
;
616 snd_pcm_sframes_t frames
;
619 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
621 if (u
->memchunk
.length
<= 0)
622 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
624 pa_assert(u
->memchunk
.length
> 0);
626 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
628 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
629 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
631 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
632 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
633 pa_memblock_release(u
->memchunk
.memblock
);
635 pa_assert(frames
!= 0);
637 if (PA_UNLIKELY(frames
< 0)) {
639 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
645 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
646 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
648 if (u
->memchunk
.length
<= 0) {
649 pa_memblock_unref(u
->memchunk
.memblock
);
650 pa_memchunk_reset(&u
->memchunk
);
655 u
->write_count
+= frames
* u
->frame_size
;
656 u
->since_start
+= frames
* u
->frame_size
;
658 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
660 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
663 n_bytes
-= (size_t) frames
* u
->frame_size
;
667 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
669 if (*sleep_usec
> process_usec
)
670 *sleep_usec
-= process_usec
;
674 return work_done
? 1 : 0;
677 static void update_smoother(struct userdata
*u
) {
678 snd_pcm_sframes_t delay
= 0;
681 pa_usec_t now1
= 0, now2
;
682 snd_pcm_status_t
*status
;
684 snd_pcm_status_alloca(&status
);
687 pa_assert(u
->pcm_handle
);
689 /* Let's update the time smoother */
691 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
692 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
696 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
697 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
699 snd_htimestamp_t htstamp
= { 0, 0 };
700 snd_pcm_status_get_htstamp(status
, &htstamp
);
701 now1
= pa_timespec_load(&htstamp
);
704 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
706 if (PA_UNLIKELY(position
< 0))
709 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
711 now1
= pa_rtclock_now();
713 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
715 pa_smoother_put(u
->smoother
, now1
, now2
);
718 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
721 pa_usec_t now1
, now2
;
725 now1
= pa_rtclock_now();
726 now2
= pa_smoother_get(u
->smoother
, now1
);
728 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
730 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
732 if (u
->memchunk
.memblock
)
733 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
738 static int build_pollfd(struct userdata
*u
) {
740 pa_assert(u
->pcm_handle
);
742 if (u
->alsa_rtpoll_item
)
743 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
745 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
751 /* Called from IO context */
752 static int suspend(struct userdata
*u
) {
754 pa_assert(u
->pcm_handle
);
756 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
758 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
759 * take awfully long with our long buffer sizes today. */
760 snd_pcm_close(u
->pcm_handle
);
761 u
->pcm_handle
= NULL
;
763 if (u
->alsa_rtpoll_item
) {
764 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
765 u
->alsa_rtpoll_item
= NULL
;
768 pa_log_info("Device suspended...");
773 /* Called from IO context */
774 static int update_sw_params(struct userdata
*u
) {
775 snd_pcm_uframes_t avail_min
;
780 /* Use the full buffer if noone asked us for anything specific */
786 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
789 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
791 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
793 /* We need at least one sample in our buffer */
795 if (PA_UNLIKELY(b
< u
->frame_size
))
798 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
801 fix_min_sleep_wakeup(u
);
802 fix_tsched_watermark(u
);
805 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
807 /* We need at last one frame in the used part of the buffer */
808 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
811 pa_usec_t sleep_usec
, process_usec
;
813 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
814 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
817 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
819 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
820 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
824 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
829 /* Called from IO context */
830 static int unsuspend(struct userdata
*u
) {
835 snd_pcm_uframes_t period_size
;
838 pa_assert(!u
->pcm_handle
);
840 pa_log_info("Trying resume...");
842 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
843 /*SND_PCM_NONBLOCK|*/
844 SND_PCM_NO_AUTO_RESAMPLE
|
845 SND_PCM_NO_AUTO_CHANNELS
|
846 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
847 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
851 ss
= u
->sink
->sample_spec
;
852 nfrags
= u
->nfragments
;
853 period_size
= u
->fragment_size
/ u
->frame_size
;
857 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
858 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
862 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
863 pa_log_warn("Resume failed, couldn't get original access mode.");
867 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
868 pa_log_warn("Resume failed, couldn't restore original sample settings.");
872 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
873 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
874 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
875 (unsigned long) nfrags
, period_size
* u
->frame_size
);
879 if (update_sw_params(u
) < 0)
882 if (build_pollfd(u
) < 0)
888 pa_log_info("Resumed successfully...");
894 snd_pcm_close(u
->pcm_handle
);
895 u
->pcm_handle
= NULL
;
901 /* Called from IO context */
902 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
903 struct userdata
*u
= PA_SINK(o
)->userdata
;
907 case PA_SINK_MESSAGE_GET_LATENCY
: {
911 r
= sink_get_latency(u
);
913 *((pa_usec_t
*) data
) = r
;
918 case PA_SINK_MESSAGE_SET_STATE
:
920 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
922 case PA_SINK_SUSPENDED
:
923 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
931 case PA_SINK_RUNNING
:
933 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
934 if (build_pollfd(u
) < 0)
938 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
939 if (unsuspend(u
) < 0)
945 case PA_SINK_UNLINKED
:
947 case PA_SINK_INVALID_STATE
:
954 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
957 /* Called from main context */
958 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
959 pa_sink_state_t old_state
;
962 pa_sink_assert_ref(s
);
963 pa_assert_se(u
= s
->userdata
);
965 old_state
= pa_sink_get_state(u
->sink
);
967 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
969 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
970 if (reserve_init(u
, u
->device_name
) < 0)
976 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
977 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
980 pa_assert(u
->mixer_handle
);
982 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
985 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
986 pa_sink_get_volume(u
->sink
, TRUE
, FALSE
);
987 pa_sink_get_mute(u
->sink
, TRUE
);
993 static void sink_get_volume_cb(pa_sink
*s
) {
994 struct userdata
*u
= s
->userdata
;
996 char t
[PA_CVOLUME_SNPRINT_MAX
];
999 pa_assert(u
->mixer_path
);
1000 pa_assert(u
->mixer_handle
);
1002 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1005 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1006 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1008 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1010 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1013 s
->virtual_volume
= u
->hardware_volume
= r
;
1015 if (u
->mixer_path
->has_dB
) {
1018 /* Hmm, so the hardware volume changed, let's reset our software volume */
1019 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
1020 pa_sink_set_soft_volume(s
, &reset
);
1024 static void sink_set_volume_cb(pa_sink
*s
) {
1025 struct userdata
*u
= s
->userdata
;
1027 char t
[PA_CVOLUME_SNPRINT_MAX
];
1030 pa_assert(u
->mixer_path
);
1031 pa_assert(u
->mixer_handle
);
1033 /* Shift up by the base volume */
1034 pa_sw_cvolume_divide_scalar(&r
, &s
->virtual_volume
, s
->base_volume
);
1036 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1039 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1040 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1042 u
->hardware_volume
= r
;
1044 if (u
->mixer_path
->has_dB
) {
1046 /* Match exactly what the user requested by software */
1047 pa_sw_cvolume_divide(&s
->soft_volume
, &s
->virtual_volume
, &u
->hardware_volume
);
1049 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->virtual_volume
));
1050 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1051 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->soft_volume
));
1054 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1056 /* We can't match exactly what the user requested, hence let's
1057 * at least tell the user about it */
1059 s
->virtual_volume
= r
;
1063 static void sink_get_mute_cb(pa_sink
*s
) {
1064 struct userdata
*u
= s
->userdata
;
1068 pa_assert(u
->mixer_path
);
1069 pa_assert(u
->mixer_handle
);
1071 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1077 static void sink_set_mute_cb(pa_sink
*s
) {
1078 struct userdata
*u
= s
->userdata
;
1081 pa_assert(u
->mixer_path
);
1082 pa_assert(u
->mixer_handle
);
1084 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1087 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1088 struct userdata
*u
= s
->userdata
;
1089 pa_alsa_port_data
*data
;
1093 pa_assert(u
->mixer_handle
);
1095 data
= PA_DEVICE_PORT_DATA(p
);
1097 pa_assert_se(u
->mixer_path
= data
->path
);
1098 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1100 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1101 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1102 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1104 if (u
->mixer_path
->max_dB
> 0.0)
1105 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1107 pa_log_info("No particular base volume set, fixing to 0 dB");
1109 s
->base_volume
= PA_VOLUME_NORM
;
1110 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1114 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1124 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1125 struct userdata
*u
= s
->userdata
;
1132 before
= u
->hwbuf_unused
;
1133 update_sw_params(u
);
1135 /* Let's check whether we now use only a smaller part of the
1136 buffer then before. If so, we need to make sure that subsequent
1137 rewinds are relative to the new maximum fill level and not to the
1138 current fill level. Thus, let's do a full rewind once, to clear
1141 if (u
->hwbuf_unused
> before
) {
1142 pa_log_debug("Requesting rewind due to latency change.");
1143 pa_sink_request_rewind(s
, (size_t) -1);
1147 static int process_rewind(struct userdata
*u
) {
1148 snd_pcm_sframes_t unused
;
1149 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1152 /* Figure out how much we shall rewind and reset the counter */
1153 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1155 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1157 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1158 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1162 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1164 if (u
->hwbuf_size
> unused_nbytes
)
1165 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1169 if (rewind_nbytes
> limit_nbytes
)
1170 rewind_nbytes
= limit_nbytes
;
1172 if (rewind_nbytes
> 0) {
1173 snd_pcm_sframes_t in_frames
, out_frames
;
1175 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1177 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1178 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1179 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1180 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1183 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1185 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1187 if (rewind_nbytes
<= 0)
1188 pa_log_info("Tried rewind, but was apparently not possible.");
1190 u
->write_count
-= out_frames
* u
->frame_size
;
1191 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1192 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1194 u
->after_rewind
= TRUE
;
1198 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1200 pa_sink_process_rewind(u
->sink
, 0);
1204 static void thread_func(void *userdata
) {
1205 struct userdata
*u
= userdata
;
1206 unsigned short revents
= 0;
1210 pa_log_debug("Thread starting up");
1212 if (u
->core
->realtime_scheduling
)
1213 pa_make_realtime(u
->core
->realtime_priority
);
1215 pa_thread_mq_install(&u
->thread_mq
);
1216 pa_rtpoll_install(u
->rtpoll
);
1222 pa_log_debug("Loop");
1225 /* Render some data and write it to the dsp */
1226 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1228 pa_usec_t sleep_usec
= 0;
1230 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1231 if (process_rewind(u
) < 0)
1235 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1237 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1242 /* pa_log_debug("work_done = %i", work_done); */
1247 pa_log_info("Starting playback.");
1248 snd_pcm_start(u
->pcm_handle
);
1250 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1256 if (u
->use_tsched
) {
1259 if (u
->since_start
<= u
->hwbuf_size
) {
1261 /* USB devices on ALSA seem to hit a buffer
1262 * underrun during the first iterations much
1263 * quicker then we calculate here, probably due to
1264 * the transport latency. To accommodate for that
1265 * we artificially decrease the sleep time until
1266 * we have filled the buffer at least once
1269 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1273 /* OK, the playback buffer is now full, let's
1274 * calculate when to wake up next */
1275 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1277 /* Convert from the sound card time domain to the
1278 * system time domain */
1279 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1281 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1283 /* We don't trust the conversion, so we wake up whatever comes first */
1284 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1288 u
->after_rewind
= FALSE
;
1290 } else if (u
->use_tsched
)
1292 /* OK, we're in an invalid state, let's disable our timers */
1293 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1295 /* Hmm, nothing to do. Let's sleep */
1296 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1302 /* Tell ALSA about this and process its response */
1303 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1304 struct pollfd
*pollfd
;
1308 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1310 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1311 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1315 if (revents
& ~POLLOUT
) {
1316 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1321 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1322 pa_log_debug("Wakeup from ALSA!");
1329 /* If this was no regular exit from the loop we have to continue
1330 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1331 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1332 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1335 pa_log_debug("Thread shutting down");
1338 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1344 pa_assert(device_name
);
1346 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1347 pa_sink_new_data_set_name(data
, n
);
1348 data
->namereg_fail
= TRUE
;
1352 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1353 data
->namereg_fail
= TRUE
;
1355 n
= device_id
? device_id
: device_name
;
1356 data
->namereg_fail
= FALSE
;
1360 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1362 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1364 pa_sink_new_data_set_name(data
, t
);
1368 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1370 if (!mapping
&& !element
)
1373 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1374 pa_log_info("Failed to find a working mixer device.");
1380 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1383 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1386 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1387 pa_alsa_path_dump(u
->mixer_path
);
1390 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1393 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1395 pa_log_debug("Probed mixer paths:");
1396 pa_alsa_path_set_dump(u
->mixer_path_set
);
1403 if (u
->mixer_path_set
) {
1404 pa_alsa_path_set_free(u
->mixer_path_set
);
1405 u
->mixer_path_set
= NULL
;
1406 } else if (u
->mixer_path
) {
1407 pa_alsa_path_free(u
->mixer_path
);
1408 u
->mixer_path
= NULL
;
1411 if (u
->mixer_handle
) {
1412 snd_mixer_close(u
->mixer_handle
);
1413 u
->mixer_handle
= NULL
;
1417 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1420 if (!u
->mixer_handle
)
1423 if (u
->sink
->active_port
) {
1424 pa_alsa_port_data
*data
;
1426 /* We have a list of supported paths, so let's activate the
1427 * one that has been chosen as active */
1429 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1430 u
->mixer_path
= data
->path
;
1432 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1435 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1439 if (!u
->mixer_path
&& u
->mixer_path_set
)
1440 u
->mixer_path
= u
->mixer_path_set
->paths
;
1442 if (u
->mixer_path
) {
1443 /* Hmm, we have only a single path, then let's activate it */
1445 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1447 if (u
->mixer_path
->settings
)
1448 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1453 if (!u
->mixer_path
->has_volume
)
1454 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1457 if (u
->mixer_path
->has_dB
) {
1458 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1460 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1461 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1463 if (u
->mixer_path
->max_dB
> 0.0)
1464 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1466 pa_log_info("No particular base volume set, fixing to 0 dB");
1469 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1470 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1471 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1474 u
->sink
->get_volume
= sink_get_volume_cb
;
1475 u
->sink
->set_volume
= sink_set_volume_cb
;
1477 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1478 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1481 if (!u
->mixer_path
->has_mute
) {
1482 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1484 u
->sink
->get_mute
= sink_get_mute_cb
;
1485 u
->sink
->set_mute
= sink_set_mute_cb
;
1486 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1487 pa_log_info("Using hardware mute control.");
1490 u
->mixer_fdl
= pa_alsa_fdlist_new();
1492 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1493 pa_log("Failed to initialize file descriptor monitoring");
1497 if (u
->mixer_path_set
)
1498 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1500 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1505 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1507 struct userdata
*u
= NULL
;
1508 const char *dev_id
= NULL
;
1509 pa_sample_spec ss
, requested_ss
;
1511 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1512 snd_pcm_uframes_t period_frames
, tsched_frames
;
1514 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1515 pa_sink_new_data data
;
1516 pa_alsa_profile_set
*profile_set
= NULL
;
1521 ss
= m
->core
->default_sample_spec
;
1522 map
= m
->core
->default_channel_map
;
1523 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1524 pa_log("Failed to parse sample specification and channel map");
1529 frame_size
= pa_frame_size(&ss
);
1531 nfrags
= m
->core
->default_n_fragments
;
1532 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1534 frag_size
= (uint32_t) frame_size
;
1535 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1536 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1538 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1539 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1540 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1541 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1542 pa_log("Failed to parse buffer metrics");
1546 hwbuf_size
= frag_size
* nfrags
;
1547 period_frames
= frag_size
/frame_size
;
1548 tsched_frames
= tsched_size
/frame_size
;
1550 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1551 pa_log("Failed to parse mmap argument.");
1555 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1556 pa_log("Failed to parse tsched argument.");
1560 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1561 pa_log("Failed to parse ignore_dB argument.");
1565 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1566 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1570 u
= pa_xnew0(struct userdata
, 1);
1573 u
->use_mmap
= use_mmap
;
1574 u
->use_tsched
= use_tsched
;
1576 u
->rtpoll
= pa_rtpoll_new();
1577 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1579 u
->smoother
= pa_smoother_new(
1580 DEFAULT_TSCHED_BUFFER_USEC
*2,
1581 DEFAULT_TSCHED_BUFFER_USEC
*2,
1588 dev_id
= pa_modargs_get_value(
1590 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1592 if (reserve_init(u
, dev_id
) < 0)
1595 if (reserve_monitor_init(u
, dev_id
) < 0)
1603 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1604 pa_log("device_id= not set");
1608 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1612 SND_PCM_STREAM_PLAYBACK
,
1613 &nfrags
, &period_frames
, tsched_frames
,
1618 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1620 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1623 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1627 SND_PCM_STREAM_PLAYBACK
,
1628 &nfrags
, &period_frames
, tsched_frames
,
1629 &b
, &d
, profile_set
, &mapping
)))
1635 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1636 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1639 SND_PCM_STREAM_PLAYBACK
,
1640 &nfrags
, &period_frames
, tsched_frames
,
1645 pa_assert(u
->device_name
);
1646 pa_log_info("Successfully opened device %s.", u
->device_name
);
1648 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1649 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1654 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1656 if (use_mmap
&& !b
) {
1657 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1658 u
->use_mmap
= use_mmap
= FALSE
;
1661 if (use_tsched
&& (!b
|| !d
)) {
1662 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1663 u
->use_tsched
= use_tsched
= FALSE
;
1666 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1667 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1668 u
->use_tsched
= use_tsched
= FALSE
;
1672 pa_log_info("Successfully enabled mmap() mode.");
1675 pa_log_info("Successfully enabled timer-based scheduling mode.");
1677 /* ALSA might tweak the sample spec, so recalculate the frame size */
1678 frame_size
= pa_frame_size(&ss
);
1680 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1682 pa_sink_new_data_init(&data
);
1683 data
.driver
= driver
;
1686 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1687 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1688 pa_sink_new_data_set_channel_map(&data
, &map
);
1690 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1691 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1692 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1693 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1694 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1697 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1698 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1701 pa_alsa_init_description(data
.proplist
);
1703 if (u
->control_device
)
1704 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1706 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1707 pa_log("Invalid properties");
1708 pa_sink_new_data_done(&data
);
1712 if (u
->mixer_path_set
)
1713 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1715 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1716 pa_sink_new_data_done(&data
);
1719 pa_log("Failed to create sink object");
1723 u
->sink
->parent
.process_msg
= sink_process_msg
;
1724 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1725 u
->sink
->set_state
= sink_set_state_cb
;
1726 u
->sink
->set_port
= sink_set_port_cb
;
1727 u
->sink
->userdata
= u
;
1729 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1730 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1732 u
->frame_size
= frame_size
;
1733 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1734 u
->nfragments
= nfrags
;
1735 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1736 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1737 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1739 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1740 nfrags
, (long unsigned) u
->fragment_size
,
1741 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1743 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1744 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1746 if (u
->use_tsched
) {
1747 u
->watermark_step
= pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC
, &u
->sink
->sample_spec
);
1749 fix_min_sleep_wakeup(u
);
1750 fix_tsched_watermark(u
);
1752 pa_sink_set_latency_range(u
->sink
,
1754 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1756 pa_log_info("Time scheduling watermark is %0.2fms",
1757 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1759 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1763 if (update_sw_params(u
) < 0)
1766 if (setup_mixer(u
, ignore_dB
) < 0)
1769 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1771 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1772 pa_log("Failed to create thread.");
1776 /* Get initial mixer settings */
1777 if (data
.volume_is_set
) {
1778 if (u
->sink
->set_volume
)
1779 u
->sink
->set_volume(u
->sink
);
1781 if (u
->sink
->get_volume
)
1782 u
->sink
->get_volume(u
->sink
);
1785 if (data
.muted_is_set
) {
1786 if (u
->sink
->set_mute
)
1787 u
->sink
->set_mute(u
->sink
);
1789 if (u
->sink
->get_mute
)
1790 u
->sink
->get_mute(u
->sink
);
1793 pa_sink_put(u
->sink
);
1796 pa_alsa_profile_set_free(profile_set
);
1806 pa_alsa_profile_set_free(profile_set
);
1811 static void userdata_free(struct userdata
*u
) {
1815 pa_sink_unlink(u
->sink
);
1818 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1819 pa_thread_free(u
->thread
);
1822 pa_thread_mq_done(&u
->thread_mq
);
1825 pa_sink_unref(u
->sink
);
1827 if (u
->memchunk
.memblock
)
1828 pa_memblock_unref(u
->memchunk
.memblock
);
1830 if (u
->alsa_rtpoll_item
)
1831 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1834 pa_rtpoll_free(u
->rtpoll
);
1836 if (u
->pcm_handle
) {
1837 snd_pcm_drop(u
->pcm_handle
);
1838 snd_pcm_close(u
->pcm_handle
);
1842 pa_alsa_fdlist_free(u
->mixer_fdl
);
1844 if (u
->mixer_path_set
)
1845 pa_alsa_path_set_free(u
->mixer_path_set
);
1846 else if (u
->mixer_path
)
1847 pa_alsa_path_free(u
->mixer_path
);
1849 if (u
->mixer_handle
)
1850 snd_mixer_close(u
->mixer_handle
);
1853 pa_smoother_free(u
->smoother
);
1858 pa_xfree(u
->device_name
);
1859 pa_xfree(u
->control_device
);
1863 void pa_alsa_sink_free(pa_sink
*s
) {
1866 pa_sink_assert_ref(s
);
1867 pa_assert_se(u
= s
->userdata
);