2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
56 #include <modules/reserve-wrap.h>
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
61 /* #define DEBUG_TIMING */
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
76 pa_thread_mq thread_mq
;
79 snd_pcm_t
*pcm_handle
;
81 pa_alsa_fdlist
*mixer_fdl
;
82 snd_mixer_t
*mixer_handle
;
83 pa_alsa_path_set
*mixer_path_set
;
84 pa_alsa_path
*mixer_path
;
86 pa_cvolume hardware_volume
;
101 char *device_name
; /* name of the PCM device */
102 char *control_device
; /* name of the control device */
104 pa_bool_t use_mmap
:1, use_tsched
:1;
106 pa_bool_t first
, after_rewind
;
108 pa_rtpoll_item
*alsa_rtpoll_item
;
110 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
112 pa_smoother
*smoother
;
113 uint64_t write_count
;
114 uint64_t since_start
;
116 pa_reserve_wrapper
*reserve
;
117 pa_hook_slot
*reserve_slot
;
118 pa_reserve_monitor_wrapper
*monitor
;
119 pa_hook_slot
*monitor_slot
;
122 static void userdata_free(struct userdata
*u
);
124 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
128 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
129 return PA_HOOK_CANCEL
;
134 static void reserve_done(struct userdata
*u
) {
137 if (u
->reserve_slot
) {
138 pa_hook_slot_free(u
->reserve_slot
);
139 u
->reserve_slot
= NULL
;
143 pa_reserve_wrapper_unref(u
->reserve
);
148 static void reserve_update(struct userdata
*u
) {
149 const char *description
;
152 if (!u
->sink
|| !u
->reserve
)
155 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
156 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
159 static int reserve_init(struct userdata
*u
, const char *dname
) {
168 if (pa_in_system_mode())
171 /* We are resuming, try to lock the device */
172 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
175 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
183 pa_assert(!u
->reserve_slot
);
184 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
189 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
195 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
197 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
201 static void monitor_done(struct userdata
*u
) {
204 if (u
->monitor_slot
) {
205 pa_hook_slot_free(u
->monitor_slot
);
206 u
->monitor_slot
= NULL
;
210 pa_reserve_monitor_wrapper_unref(u
->monitor
);
215 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
221 if (pa_in_system_mode())
224 /* We are resuming, try to lock the device */
225 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
228 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
234 pa_assert(!u
->monitor_slot
);
235 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
240 static void fix_min_sleep_wakeup(struct userdata
*u
) {
241 size_t max_use
, max_use_2
;
245 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
246 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
248 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
249 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
251 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
252 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
255 static void fix_tsched_watermark(struct userdata
*u
) {
259 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
261 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
262 u
->tsched_watermark
= max_use
- u
->min_sleep
;
264 if (u
->tsched_watermark
< u
->min_wakeup
)
265 u
->tsched_watermark
= u
->min_wakeup
;
268 static void adjust_after_underrun(struct userdata
*u
) {
269 size_t old_watermark
;
270 pa_usec_t old_min_latency
, new_min_latency
;
273 pa_assert(u
->use_tsched
);
275 /* First, just try to increase the watermark */
276 old_watermark
= u
->tsched_watermark
;
277 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_step
);
278 fix_tsched_watermark(u
);
280 if (old_watermark
!= u
->tsched_watermark
) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency
= u
->sink
->thread_info
.min_latency
;
288 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_STEP_USEC
);
289 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
291 if (old_min_latency
!= new_min_latency
) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
295 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
299 /* When we reach this we're officialy fucked! */
302 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
305 pa_assert(sleep_usec
);
306 pa_assert(process_usec
);
310 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
312 if (usec
== (pa_usec_t
) -1)
313 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
315 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
320 *sleep_usec
= usec
- wm
;
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
326 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
327 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
331 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
336 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
338 pa_assert(err
!= -EAGAIN
);
341 pa_log_debug("%s: Buffer underrun!", call
);
343 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
344 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
353 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
) {
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
361 if (n_bytes
<= u
->hwbuf_size
) {
362 left_to_play
= u
->hwbuf_size
- n_bytes
;
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
375 if (!u
->first
&& !u
->after_rewind
) {
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
381 adjust_after_underrun(u
);
388 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
389 pa_bool_t work_done
= TRUE
;
390 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
395 pa_sink_assert_ref(u
->sink
);
398 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
405 /* First we determine how many samples are missing to fill the
406 * buffer up to 100% */
408 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
410 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
416 n_bytes
= (size_t) n
* u
->frame_size
;
419 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
422 left_to_play
= check_left_to_play(u
, n_bytes
);
426 /* We won't fill up the playback buffer before at least
427 * half the sleep time is over because otherwise we might
428 * ask for more data from the clients then they expect. We
429 * need to guarantee that clients only have to keep around
430 * a single hw buffer length. */
433 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
435 pa_log_debug("Not filling up, because too early.");
440 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
444 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
445 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
446 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
447 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
453 pa_log_debug("Not filling up, because not necessary.");
461 pa_log_debug("Not filling up, because already too many iterations.");
467 n_bytes
-= u
->hwbuf_unused
;
471 pa_log_debug("Filling up");
478 const snd_pcm_channel_area_t
*areas
;
479 snd_pcm_uframes_t offset
, frames
;
480 snd_pcm_sframes_t sframes
;
482 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
483 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
485 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
487 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
493 /* Make sure that if these memblocks need to be copied they will fit into one slot */
494 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
495 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
497 /* Check these are multiples of 8 bit */
498 pa_assert((areas
[0].first
& 7) == 0);
499 pa_assert((areas
[0].step
& 7)== 0);
501 /* We assume a single interleaved memory buffer */
502 pa_assert((areas
[0].first
>> 3) == 0);
503 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
505 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
507 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
508 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
511 pa_sink_render_into_full(u
->sink
, &chunk
);
512 pa_memblock_unref_fixed(chunk
.memblock
);
514 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
516 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
524 u
->write_count
+= frames
* u
->frame_size
;
525 u
->since_start
+= frames
* u
->frame_size
;
528 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
531 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
534 n_bytes
-= (size_t) frames
* u
->frame_size
;
538 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
540 if (*sleep_usec
> process_usec
)
541 *sleep_usec
-= process_usec
;
545 return work_done
? 1 : 0;
548 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
549 pa_bool_t work_done
= FALSE
;
550 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
555 pa_sink_assert_ref(u
->sink
);
558 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
565 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
567 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
573 n_bytes
= (size_t) n
* u
->frame_size
;
574 left_to_play
= check_left_to_play(u
, n_bytes
);
578 /* We won't fill up the playback buffer before at least
579 * half the sleep time is over because otherwise we might
580 * ask for more data from the clients then they expect. We
581 * need to guarantee that clients only have to keep around
582 * a single hw buffer length. */
585 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
588 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
592 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
593 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
594 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
595 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
605 pa_log_debug("Not filling up, because already too many iterations.");
611 n_bytes
-= u
->hwbuf_unused
;
615 snd_pcm_sframes_t frames
;
618 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
620 if (u
->memchunk
.length
<= 0)
621 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
623 pa_assert(u
->memchunk
.length
> 0);
625 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
627 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
628 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
630 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
631 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
632 pa_memblock_release(u
->memchunk
.memblock
);
634 pa_assert(frames
!= 0);
636 if (PA_UNLIKELY(frames
< 0)) {
638 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
644 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
645 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
647 if (u
->memchunk
.length
<= 0) {
648 pa_memblock_unref(u
->memchunk
.memblock
);
649 pa_memchunk_reset(&u
->memchunk
);
654 u
->write_count
+= frames
* u
->frame_size
;
655 u
->since_start
+= frames
* u
->frame_size
;
657 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
659 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
662 n_bytes
-= (size_t) frames
* u
->frame_size
;
666 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
668 if (*sleep_usec
> process_usec
)
669 *sleep_usec
-= process_usec
;
673 return work_done
? 1 : 0;
676 static void update_smoother(struct userdata
*u
) {
677 snd_pcm_sframes_t delay
= 0;
680 pa_usec_t now1
= 0, now2
;
681 snd_pcm_status_t
*status
;
683 snd_pcm_status_alloca(&status
);
686 pa_assert(u
->pcm_handle
);
688 /* Let's update the time smoother */
690 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
691 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
695 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
696 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
698 snd_htimestamp_t htstamp
= { 0, 0 };
699 snd_pcm_status_get_htstamp(status
, &htstamp
);
700 now1
= pa_timespec_load(&htstamp
);
703 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
705 if (PA_UNLIKELY(position
< 0))
708 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
710 now1
= pa_rtclock_now();
712 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
714 pa_smoother_put(u
->smoother
, now1
, now2
);
717 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
720 pa_usec_t now1
, now2
;
724 now1
= pa_rtclock_now();
725 now2
= pa_smoother_get(u
->smoother
, now1
);
727 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
729 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
731 if (u
->memchunk
.memblock
)
732 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
737 static int build_pollfd(struct userdata
*u
) {
739 pa_assert(u
->pcm_handle
);
741 if (u
->alsa_rtpoll_item
)
742 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
744 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
750 /* Called from IO context */
751 static int suspend(struct userdata
*u
) {
753 pa_assert(u
->pcm_handle
);
755 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
757 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
758 * take awfully long with our long buffer sizes today. */
759 snd_pcm_close(u
->pcm_handle
);
760 u
->pcm_handle
= NULL
;
762 if (u
->alsa_rtpoll_item
) {
763 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
764 u
->alsa_rtpoll_item
= NULL
;
767 pa_log_info("Device suspended...");
772 /* Called from IO context */
773 static int update_sw_params(struct userdata
*u
) {
774 snd_pcm_uframes_t avail_min
;
779 /* Use the full buffer if noone asked us for anything specific */
785 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
788 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
790 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
792 /* We need at least one sample in our buffer */
794 if (PA_UNLIKELY(b
< u
->frame_size
))
797 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
800 fix_min_sleep_wakeup(u
);
801 fix_tsched_watermark(u
);
804 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
806 /* We need at last one frame in the used part of the buffer */
807 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
810 pa_usec_t sleep_usec
, process_usec
;
812 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
813 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
816 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
818 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
819 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
823 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
828 /* Called from IO context */
829 static int unsuspend(struct userdata
*u
) {
834 snd_pcm_uframes_t period_size
;
837 pa_assert(!u
->pcm_handle
);
839 pa_log_info("Trying resume...");
841 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
842 /*SND_PCM_NONBLOCK|*/
843 SND_PCM_NO_AUTO_RESAMPLE
|
844 SND_PCM_NO_AUTO_CHANNELS
|
845 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
846 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
850 ss
= u
->sink
->sample_spec
;
851 nfrags
= u
->nfragments
;
852 period_size
= u
->fragment_size
/ u
->frame_size
;
856 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
857 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
861 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
862 pa_log_warn("Resume failed, couldn't get original access mode.");
866 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
867 pa_log_warn("Resume failed, couldn't restore original sample settings.");
871 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
872 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
873 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
874 (unsigned long) nfrags
, period_size
* u
->frame_size
);
878 if (update_sw_params(u
) < 0)
881 if (build_pollfd(u
) < 0)
887 pa_log_info("Resumed successfully...");
893 snd_pcm_close(u
->pcm_handle
);
894 u
->pcm_handle
= NULL
;
900 /* Called from IO context */
901 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
902 struct userdata
*u
= PA_SINK(o
)->userdata
;
906 case PA_SINK_MESSAGE_GET_LATENCY
: {
910 r
= sink_get_latency(u
);
912 *((pa_usec_t
*) data
) = r
;
917 case PA_SINK_MESSAGE_SET_STATE
:
919 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
921 case PA_SINK_SUSPENDED
:
922 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
930 case PA_SINK_RUNNING
:
932 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
933 if (build_pollfd(u
) < 0)
937 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
938 if (unsuspend(u
) < 0)
944 case PA_SINK_UNLINKED
:
946 case PA_SINK_INVALID_STATE
:
953 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
956 /* Called from main context */
957 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
958 pa_sink_state_t old_state
;
961 pa_sink_assert_ref(s
);
962 pa_assert_se(u
= s
->userdata
);
964 old_state
= pa_sink_get_state(u
->sink
);
966 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
968 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
969 if (reserve_init(u
, u
->device_name
) < 0)
975 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
976 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
979 pa_assert(u
->mixer_handle
);
981 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
984 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
985 pa_sink_get_volume(u
->sink
, TRUE
, FALSE
);
986 pa_sink_get_mute(u
->sink
, TRUE
);
992 static void sink_get_volume_cb(pa_sink
*s
) {
993 struct userdata
*u
= s
->userdata
;
995 char t
[PA_CVOLUME_SNPRINT_MAX
];
998 pa_assert(u
->mixer_path
);
999 pa_assert(u
->mixer_handle
);
1001 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1004 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1005 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1007 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1009 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1012 s
->virtual_volume
= u
->hardware_volume
= r
;
1014 if (u
->mixer_path
->has_dB
) {
1017 /* Hmm, so the hardware volume changed, let's reset our software volume */
1018 pa_cvolume_reset(&reset
, s
->sample_spec
.channels
);
1019 pa_sink_set_soft_volume(s
, &reset
);
1023 static void sink_set_volume_cb(pa_sink
*s
) {
1024 struct userdata
*u
= s
->userdata
;
1026 char t
[PA_CVOLUME_SNPRINT_MAX
];
1029 pa_assert(u
->mixer_path
);
1030 pa_assert(u
->mixer_handle
);
1032 /* Shift up by the base volume */
1033 pa_sw_cvolume_divide_scalar(&r
, &s
->virtual_volume
, s
->base_volume
);
1035 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1038 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1039 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1041 u
->hardware_volume
= r
;
1043 if (u
->mixer_path
->has_dB
) {
1045 /* Match exactly what the user requested by software */
1046 pa_sw_cvolume_divide(&s
->soft_volume
, &s
->virtual_volume
, &u
->hardware_volume
);
1048 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->virtual_volume
));
1049 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1050 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->soft_volume
));
1053 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1055 /* We can't match exactly what the user requested, hence let's
1056 * at least tell the user about it */
1058 s
->virtual_volume
= r
;
1062 static void sink_get_mute_cb(pa_sink
*s
) {
1063 struct userdata
*u
= s
->userdata
;
1067 pa_assert(u
->mixer_path
);
1068 pa_assert(u
->mixer_handle
);
1070 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1076 static void sink_set_mute_cb(pa_sink
*s
) {
1077 struct userdata
*u
= s
->userdata
;
1080 pa_assert(u
->mixer_path
);
1081 pa_assert(u
->mixer_handle
);
1083 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1086 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1087 struct userdata
*u
= s
->userdata
;
1088 pa_alsa_port_data
*data
;
1092 pa_assert(u
->mixer_handle
);
1094 data
= PA_DEVICE_PORT_DATA(p
);
1096 pa_assert_se(u
->mixer_path
= data
->path
);
1097 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1099 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1100 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1101 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1103 if (u
->mixer_path
->max_dB
> 0.0)
1104 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1106 pa_log_info("No particular base volume set, fixing to 0 dB");
1108 s
->base_volume
= PA_VOLUME_NORM
;
1109 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1113 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1123 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1124 struct userdata
*u
= s
->userdata
;
1131 before
= u
->hwbuf_unused
;
1132 update_sw_params(u
);
1134 /* Let's check whether we now use only a smaller part of the
1135 buffer then before. If so, we need to make sure that subsequent
1136 rewinds are relative to the new maximum fill level and not to the
1137 current fill level. Thus, let's do a full rewind once, to clear
1140 if (u
->hwbuf_unused
> before
) {
1141 pa_log_debug("Requesting rewind due to latency change.");
1142 pa_sink_request_rewind(s
, (size_t) -1);
1146 static int process_rewind(struct userdata
*u
) {
1147 snd_pcm_sframes_t unused
;
1148 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1151 /* Figure out how much we shall rewind and reset the counter */
1152 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1154 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1156 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1157 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1161 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1163 if (u
->hwbuf_size
> unused_nbytes
)
1164 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1168 if (rewind_nbytes
> limit_nbytes
)
1169 rewind_nbytes
= limit_nbytes
;
1171 if (rewind_nbytes
> 0) {
1172 snd_pcm_sframes_t in_frames
, out_frames
;
1174 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1176 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1177 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1178 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1179 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1182 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1184 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1186 if (rewind_nbytes
<= 0)
1187 pa_log_info("Tried rewind, but was apparently not possible.");
1189 u
->write_count
-= out_frames
* u
->frame_size
;
1190 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1191 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1193 u
->after_rewind
= TRUE
;
1197 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1199 pa_sink_process_rewind(u
->sink
, 0);
1203 static void thread_func(void *userdata
) {
1204 struct userdata
*u
= userdata
;
1205 unsigned short revents
= 0;
1209 pa_log_debug("Thread starting up");
1211 if (u
->core
->realtime_scheduling
)
1212 pa_make_realtime(u
->core
->realtime_priority
);
1214 pa_thread_mq_install(&u
->thread_mq
);
1215 pa_rtpoll_install(u
->rtpoll
);
1221 pa_log_debug("Loop");
1224 /* Render some data and write it to the dsp */
1225 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1227 pa_usec_t sleep_usec
= 0;
1229 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1230 if (process_rewind(u
) < 0)
1234 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1236 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1241 /* pa_log_debug("work_done = %i", work_done); */
1246 pa_log_info("Starting playback.");
1247 snd_pcm_start(u
->pcm_handle
);
1249 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1255 if (u
->use_tsched
) {
1258 if (u
->since_start
<= u
->hwbuf_size
) {
1260 /* USB devices on ALSA seem to hit a buffer
1261 * underrun during the first iterations much
1262 * quicker then we calculate here, probably due to
1263 * the transport latency. To accommodate for that
1264 * we artificially decrease the sleep time until
1265 * we have filled the buffer at least once
1268 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1272 /* OK, the playback buffer is now full, let's
1273 * calculate when to wake up next */
1274 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1276 /* Convert from the sound card time domain to the
1277 * system time domain */
1278 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1280 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1282 /* We don't trust the conversion, so we wake up whatever comes first */
1283 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1287 u
->after_rewind
= FALSE
;
1289 } else if (u
->use_tsched
)
1291 /* OK, we're in an invalid state, let's disable our timers */
1292 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1294 /* Hmm, nothing to do. Let's sleep */
1295 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1301 /* Tell ALSA about this and process its response */
1302 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1303 struct pollfd
*pollfd
;
1307 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1309 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1310 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1314 if (revents
& ~POLLOUT
) {
1315 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1320 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1321 pa_log_debug("Wakeup from ALSA!");
1328 /* If this was no regular exit from the loop we have to continue
1329 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1330 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1331 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1334 pa_log_debug("Thread shutting down");
1337 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1343 pa_assert(device_name
);
1345 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1346 pa_sink_new_data_set_name(data
, n
);
1347 data
->namereg_fail
= TRUE
;
1351 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1352 data
->namereg_fail
= TRUE
;
1354 n
= device_id
? device_id
: device_name
;
1355 data
->namereg_fail
= FALSE
;
1359 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1361 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1363 pa_sink_new_data_set_name(data
, t
);
1367 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1369 if (!mapping
&& !element
)
1372 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1373 pa_log_info("Failed to find a working mixer device.");
1379 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1382 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1385 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1386 pa_alsa_path_dump(u
->mixer_path
);
1389 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1392 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1394 pa_log_debug("Probed mixer paths:");
1395 pa_alsa_path_set_dump(u
->mixer_path_set
);
1402 if (u
->mixer_path_set
) {
1403 pa_alsa_path_set_free(u
->mixer_path_set
);
1404 u
->mixer_path_set
= NULL
;
1405 } else if (u
->mixer_path
) {
1406 pa_alsa_path_free(u
->mixer_path
);
1407 u
->mixer_path
= NULL
;
1410 if (u
->mixer_handle
) {
1411 snd_mixer_close(u
->mixer_handle
);
1412 u
->mixer_handle
= NULL
;
1416 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1419 if (!u
->mixer_handle
)
1422 if (u
->sink
->active_port
) {
1423 pa_alsa_port_data
*data
;
1425 /* We have a list of supported paths, so let's activate the
1426 * one that has been chosen as active */
1428 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1429 u
->mixer_path
= data
->path
;
1431 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1434 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1438 if (!u
->mixer_path
&& u
->mixer_path_set
)
1439 u
->mixer_path
= u
->mixer_path_set
->paths
;
1441 if (u
->mixer_path
) {
1442 /* Hmm, we have only a single path, then let's activate it */
1444 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1446 if (u
->mixer_path
->settings
)
1447 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1452 if (!u
->mixer_path
->has_volume
)
1453 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1456 if (u
->mixer_path
->has_dB
) {
1457 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1459 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1460 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1462 if (u
->mixer_path
->max_dB
> 0.0)
1463 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1465 pa_log_info("No particular base volume set, fixing to 0 dB");
1468 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1469 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1470 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1473 u
->sink
->get_volume
= sink_get_volume_cb
;
1474 u
->sink
->set_volume
= sink_set_volume_cb
;
1476 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1477 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1480 if (!u
->mixer_path
->has_mute
) {
1481 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1483 u
->sink
->get_mute
= sink_get_mute_cb
;
1484 u
->sink
->set_mute
= sink_set_mute_cb
;
1485 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1486 pa_log_info("Using hardware mute control.");
1489 u
->mixer_fdl
= pa_alsa_fdlist_new();
1491 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1492 pa_log("Failed to initialize file descriptor monitoring");
1496 if (u
->mixer_path_set
)
1497 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1499 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1504 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1506 struct userdata
*u
= NULL
;
1507 const char *dev_id
= NULL
;
1508 pa_sample_spec ss
, requested_ss
;
1510 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1511 snd_pcm_uframes_t period_frames
, tsched_frames
;
1513 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1514 pa_sink_new_data data
;
1515 pa_alsa_profile_set
*profile_set
= NULL
;
1520 ss
= m
->core
->default_sample_spec
;
1521 map
= m
->core
->default_channel_map
;
1522 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1523 pa_log("Failed to parse sample specification and channel map");
1528 frame_size
= pa_frame_size(&ss
);
1530 nfrags
= m
->core
->default_n_fragments
;
1531 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1533 frag_size
= (uint32_t) frame_size
;
1534 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1535 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1537 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1538 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1539 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1540 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1541 pa_log("Failed to parse buffer metrics");
1545 hwbuf_size
= frag_size
* nfrags
;
1546 period_frames
= frag_size
/frame_size
;
1547 tsched_frames
= tsched_size
/frame_size
;
1549 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1550 pa_log("Failed to parse mmap argument.");
1554 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1555 pa_log("Failed to parse tsched argument.");
1559 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1560 pa_log("Failed to parse ignore_dB argument.");
1564 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1565 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1569 u
= pa_xnew0(struct userdata
, 1);
1572 u
->use_mmap
= use_mmap
;
1573 u
->use_tsched
= use_tsched
;
1575 u
->rtpoll
= pa_rtpoll_new();
1576 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1578 u
->smoother
= pa_smoother_new(
1579 DEFAULT_TSCHED_BUFFER_USEC
*2,
1580 DEFAULT_TSCHED_BUFFER_USEC
*2,
1587 dev_id
= pa_modargs_get_value(
1589 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1591 if (reserve_init(u
, dev_id
) < 0)
1594 if (reserve_monitor_init(u
, dev_id
) < 0)
1602 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1603 pa_log("device_id= not set");
1607 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1611 SND_PCM_STREAM_PLAYBACK
,
1612 &nfrags
, &period_frames
, tsched_frames
,
1617 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1619 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1622 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1626 SND_PCM_STREAM_PLAYBACK
,
1627 &nfrags
, &period_frames
, tsched_frames
,
1628 &b
, &d
, profile_set
, &mapping
)))
1634 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1635 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1638 SND_PCM_STREAM_PLAYBACK
,
1639 &nfrags
, &period_frames
, tsched_frames
,
1644 pa_assert(u
->device_name
);
1645 pa_log_info("Successfully opened device %s.", u
->device_name
);
1647 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1648 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1653 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1655 if (use_mmap
&& !b
) {
1656 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1657 u
->use_mmap
= use_mmap
= FALSE
;
1660 if (use_tsched
&& (!b
|| !d
)) {
1661 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1662 u
->use_tsched
= use_tsched
= FALSE
;
1665 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1666 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1667 u
->use_tsched
= use_tsched
= FALSE
;
1671 pa_log_info("Successfully enabled mmap() mode.");
1674 pa_log_info("Successfully enabled timer-based scheduling mode.");
1676 /* ALSA might tweak the sample spec, so recalculate the frame size */
1677 frame_size
= pa_frame_size(&ss
);
1679 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1681 pa_sink_new_data_init(&data
);
1682 data
.driver
= driver
;
1685 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1686 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1687 pa_sink_new_data_set_channel_map(&data
, &map
);
1689 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1690 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1691 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1692 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1693 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1696 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1697 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1700 pa_alsa_init_description(data
.proplist
);
1702 if (u
->control_device
)
1703 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1705 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1706 pa_log("Invalid properties");
1707 pa_sink_new_data_done(&data
);
1711 if (u
->mixer_path_set
)
1712 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1714 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1715 pa_sink_new_data_done(&data
);
1718 pa_log("Failed to create sink object");
1722 u
->sink
->parent
.process_msg
= sink_process_msg
;
1723 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1724 u
->sink
->set_state
= sink_set_state_cb
;
1725 u
->sink
->set_port
= sink_set_port_cb
;
1726 u
->sink
->userdata
= u
;
1728 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1729 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1731 u
->frame_size
= frame_size
;
1732 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1733 u
->nfragments
= nfrags
;
1734 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1735 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1736 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1738 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1739 nfrags
, (long unsigned) u
->fragment_size
,
1740 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1742 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1743 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1745 if (u
->use_tsched
) {
1746 u
->watermark_step
= pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC
, &u
->sink
->sample_spec
);
1748 fix_min_sleep_wakeup(u
);
1749 fix_tsched_watermark(u
);
1751 pa_sink_set_latency_range(u
->sink
,
1753 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1755 pa_log_info("Time scheduling watermark is %0.2fms",
1756 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1758 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1762 if (update_sw_params(u
) < 0)
1765 if (setup_mixer(u
, ignore_dB
) < 0)
1768 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1770 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1771 pa_log("Failed to create thread.");
1775 /* Get initial mixer settings */
1776 if (data
.volume_is_set
) {
1777 if (u
->sink
->set_volume
)
1778 u
->sink
->set_volume(u
->sink
);
1780 if (u
->sink
->get_volume
)
1781 u
->sink
->get_volume(u
->sink
);
1784 if (data
.muted_is_set
) {
1785 if (u
->sink
->set_mute
)
1786 u
->sink
->set_mute(u
->sink
);
1788 if (u
->sink
->get_mute
)
1789 u
->sink
->get_mute(u
->sink
);
1792 pa_sink_put(u
->sink
);
1795 pa_alsa_profile_set_free(profile_set
);
1805 pa_alsa_profile_set_free(profile_set
);
1810 static void userdata_free(struct userdata
*u
) {
1814 pa_sink_unlink(u
->sink
);
1817 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1818 pa_thread_free(u
->thread
);
1821 pa_thread_mq_done(&u
->thread_mq
);
1824 pa_sink_unref(u
->sink
);
1826 if (u
->memchunk
.memblock
)
1827 pa_memblock_unref(u
->memchunk
.memblock
);
1829 if (u
->alsa_rtpoll_item
)
1830 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1833 pa_rtpoll_free(u
->rtpoll
);
1835 if (u
->pcm_handle
) {
1836 snd_pcm_drop(u
->pcm_handle
);
1837 snd_pcm_close(u
->pcm_handle
);
1841 pa_alsa_fdlist_free(u
->mixer_fdl
);
1843 if (u
->mixer_path_set
)
1844 pa_alsa_path_set_free(u
->mixer_path_set
);
1845 else if (u
->mixer_path
)
1846 pa_alsa_path_free(u
->mixer_path
);
1848 if (u
->mixer_handle
)
1849 snd_mixer_close(u
->mixer_handle
);
1852 pa_smoother_free(u
->smoother
);
1857 pa_xfree(u
->device_name
);
1858 pa_xfree(u
->control_device
);
1862 void pa_alsa_sink_free(pa_sink
*s
) {
1865 pa_sink_assert_ref(s
);
1866 pa_assert_se(u
= s
->userdata
);