2 This file is part of PulseAudio.
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <asoundlib.h>
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
57 #include <modules/reserve-wrap.h>
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
62 /* #define DEBUG_TIMING */
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
71 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
72 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
74 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
82 pa_thread_mq thread_mq
;
85 snd_pcm_t
*pcm_handle
;
87 pa_alsa_fdlist
*mixer_fdl
;
88 snd_mixer_t
*mixer_handle
;
89 pa_alsa_path_set
*mixer_path_set
;
90 pa_alsa_path
*mixer_path
;
92 pa_cvolume hardware_volume
;
105 pa_memchunk memchunk
;
107 char *device_name
; /* name of the PCM device */
108 char *control_device
; /* name of the control device */
110 pa_bool_t use_mmap
:1, use_tsched
:1;
112 pa_bool_t first
, after_rewind
;
114 pa_rtpoll_item
*alsa_rtpoll_item
;
116 snd_mixer_selem_channel_id_t mixer_map
[SND_MIXER_SCHN_LAST
];
118 pa_smoother
*smoother
;
119 uint64_t write_count
;
120 uint64_t since_start
;
121 pa_usec_t smoother_interval
;
122 pa_usec_t last_smoother_update
;
124 pa_reserve_wrapper
*reserve
;
125 pa_hook_slot
*reserve_slot
;
126 pa_reserve_monitor_wrapper
*monitor
;
127 pa_hook_slot
*monitor_slot
;
130 static void userdata_free(struct userdata
*u
);
132 static pa_hook_result_t
reserve_cb(pa_reserve_wrapper
*r
, void *forced
, struct userdata
*u
) {
136 if (pa_sink_suspend(u
->sink
, TRUE
, PA_SUSPEND_APPLICATION
) < 0)
137 return PA_HOOK_CANCEL
;
142 static void reserve_done(struct userdata
*u
) {
145 if (u
->reserve_slot
) {
146 pa_hook_slot_free(u
->reserve_slot
);
147 u
->reserve_slot
= NULL
;
151 pa_reserve_wrapper_unref(u
->reserve
);
156 static void reserve_update(struct userdata
*u
) {
157 const char *description
;
160 if (!u
->sink
|| !u
->reserve
)
163 if ((description
= pa_proplist_gets(u
->sink
->proplist
, PA_PROP_DEVICE_DESCRIPTION
)))
164 pa_reserve_wrapper_set_application_device_name(u
->reserve
, description
);
167 static int reserve_init(struct userdata
*u
, const char *dname
) {
176 if (pa_in_system_mode())
179 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
182 /* We are resuming, try to lock the device */
183 u
->reserve
= pa_reserve_wrapper_get(u
->core
, rname
);
191 pa_assert(!u
->reserve_slot
);
192 u
->reserve_slot
= pa_hook_connect(pa_reserve_wrapper_hook(u
->reserve
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) reserve_cb
, u
);
197 static pa_hook_result_t
monitor_cb(pa_reserve_monitor_wrapper
*w
, void* busy
, struct userdata
*u
) {
203 b
= PA_PTR_TO_UINT(busy
) && !u
->reserve
;
205 pa_sink_suspend(u
->sink
, b
, PA_SUSPEND_APPLICATION
);
209 static void monitor_done(struct userdata
*u
) {
212 if (u
->monitor_slot
) {
213 pa_hook_slot_free(u
->monitor_slot
);
214 u
->monitor_slot
= NULL
;
218 pa_reserve_monitor_wrapper_unref(u
->monitor
);
223 static int reserve_monitor_init(struct userdata
*u
, const char *dname
) {
229 if (pa_in_system_mode())
232 if (!(rname
= pa_alsa_get_reserve_name(dname
)))
235 u
->monitor
= pa_reserve_monitor_wrapper_get(u
->core
, rname
);
241 pa_assert(!u
->monitor_slot
);
242 u
->monitor_slot
= pa_hook_connect(pa_reserve_monitor_wrapper_hook(u
->monitor
), PA_HOOK_NORMAL
, (pa_hook_cb_t
) monitor_cb
, u
);
247 static void fix_min_sleep_wakeup(struct userdata
*u
) {
248 size_t max_use
, max_use_2
;
252 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
253 max_use_2
= pa_frame_align(max_use
/2, &u
->sink
->sample_spec
);
255 u
->min_sleep
= pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC
, &u
->sink
->sample_spec
);
256 u
->min_sleep
= PA_CLAMP(u
->min_sleep
, u
->frame_size
, max_use_2
);
258 u
->min_wakeup
= pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC
, &u
->sink
->sample_spec
);
259 u
->min_wakeup
= PA_CLAMP(u
->min_wakeup
, u
->frame_size
, max_use_2
);
262 static void fix_tsched_watermark(struct userdata
*u
) {
266 max_use
= u
->hwbuf_size
- u
->hwbuf_unused
;
268 if (u
->tsched_watermark
> max_use
- u
->min_sleep
)
269 u
->tsched_watermark
= max_use
- u
->min_sleep
;
271 if (u
->tsched_watermark
< u
->min_wakeup
)
272 u
->tsched_watermark
= u
->min_wakeup
;
275 static void adjust_after_underrun(struct userdata
*u
) {
276 size_t old_watermark
;
277 pa_usec_t old_min_latency
, new_min_latency
;
280 pa_assert(u
->use_tsched
);
282 /* First, just try to increase the watermark */
283 old_watermark
= u
->tsched_watermark
;
284 u
->tsched_watermark
= PA_MIN(u
->tsched_watermark
* 2, u
->tsched_watermark
+ u
->watermark_step
);
285 fix_tsched_watermark(u
);
287 if (old_watermark
!= u
->tsched_watermark
) {
288 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
289 (double) pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
293 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
294 old_min_latency
= u
->sink
->thread_info
.min_latency
;
295 new_min_latency
= PA_MIN(old_min_latency
* 2, old_min_latency
+ TSCHED_WATERMARK_STEP_USEC
);
296 new_min_latency
= PA_MIN(new_min_latency
, u
->sink
->thread_info
.max_latency
);
298 if (old_min_latency
!= new_min_latency
) {
299 pa_log_notice("Increasing minimal latency to %0.2f ms",
300 (double) new_min_latency
/ PA_USEC_PER_MSEC
);
302 pa_sink_set_latency_range_within_thread(u
->sink
, new_min_latency
, u
->sink
->thread_info
.max_latency
);
306 /* When we reach this we're officialy fucked! */
309 static void hw_sleep_time(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_usec_t
*process_usec
) {
312 pa_assert(sleep_usec
);
313 pa_assert(process_usec
);
317 usec
= pa_sink_get_requested_latency_within_thread(u
->sink
);
319 if (usec
== (pa_usec_t
) -1)
320 usec
= pa_bytes_to_usec(u
->hwbuf_size
, &u
->sink
->sample_spec
);
322 wm
= pa_bytes_to_usec(u
->tsched_watermark
, &u
->sink
->sample_spec
);
327 *sleep_usec
= usec
- wm
;
331 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
332 (unsigned long) (usec
/ PA_USEC_PER_MSEC
),
333 (unsigned long) (*sleep_usec
/ PA_USEC_PER_MSEC
),
334 (unsigned long) (*process_usec
/ PA_USEC_PER_MSEC
));
338 static int try_recover(struct userdata
*u
, const char *call
, int err
) {
343 pa_log_debug("%s: %s", call
, pa_alsa_strerror(err
));
345 pa_assert(err
!= -EAGAIN
);
348 pa_log_debug("%s: Buffer underrun!", call
);
350 if (err
== -ESTRPIPE
)
351 pa_log_debug("%s: System suspended!", call
);
353 if ((err
= snd_pcm_recover(u
->pcm_handle
, err
, 1)) < 0) {
354 pa_log("%s: %s", call
, pa_alsa_strerror(err
));
363 static size_t check_left_to_play(struct userdata
*u
, size_t n_bytes
) {
366 /* We use <= instead of < for this check here because an underrun
367 * only happens after the last sample was processed, not already when
368 * it is removed from the buffer. This is particularly important
369 * when block transfer is used. */
371 if (n_bytes
<= u
->hwbuf_size
) {
372 left_to_play
= u
->hwbuf_size
- n_bytes
;
375 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) / PA_USEC_PER_MSEC
);
385 if (!u
->first
&& !u
->after_rewind
) {
387 if (pa_log_ratelimit())
388 pa_log_info("Underrun!");
391 adjust_after_underrun(u
);
398 static int mmap_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
399 pa_bool_t work_done
= TRUE
;
400 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
405 pa_sink_assert_ref(u
->sink
);
408 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
414 pa_bool_t after_avail
= TRUE
;
416 /* First we determine how many samples are missing to fill the
417 * buffer up to 100% */
419 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
421 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
427 n_bytes
= (size_t) n
* u
->frame_size
;
430 pa_log_debug("avail: %lu", (unsigned long) n_bytes
);
433 left_to_play
= check_left_to_play(u
, n_bytes
);
437 /* We won't fill up the playback buffer before at least
438 * half the sleep time is over because otherwise we might
439 * ask for more data from the clients then they expect. We
440 * need to guarantee that clients only have to keep around
441 * a single hw buffer length. */
444 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2) {
446 pa_log_debug("Not filling up, because too early.");
451 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
455 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
456 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
457 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
458 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
464 pa_log_debug("Not filling up, because not necessary.");
472 pa_log_debug("Not filling up, because already too many iterations.");
478 n_bytes
-= u
->hwbuf_unused
;
482 pa_log_debug("Filling up");
489 const snd_pcm_channel_area_t
*areas
;
490 snd_pcm_uframes_t offset
, frames
;
491 snd_pcm_sframes_t sframes
;
493 frames
= (snd_pcm_uframes_t
) (n_bytes
/ u
->frame_size
);
494 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
496 if (PA_UNLIKELY((err
= pa_alsa_safe_mmap_begin(u
->pcm_handle
, &areas
, &offset
, &frames
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
498 if (!after_avail
&& err
== -EAGAIN
)
501 if ((r
= try_recover(u
, "snd_pcm_mmap_begin", err
)) == 0)
507 /* Make sure that if these memblocks need to be copied they will fit into one slot */
508 if (frames
> pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
)
509 frames
= pa_mempool_block_size_max(u
->sink
->core
->mempool
)/u
->frame_size
;
511 if (!after_avail
&& frames
== 0)
514 pa_assert(frames
> 0);
517 /* Check these are multiples of 8 bit */
518 pa_assert((areas
[0].first
& 7) == 0);
519 pa_assert((areas
[0].step
& 7)== 0);
521 /* We assume a single interleaved memory buffer */
522 pa_assert((areas
[0].first
>> 3) == 0);
523 pa_assert((areas
[0].step
>> 3) == u
->frame_size
);
525 p
= (uint8_t*) areas
[0].addr
+ (offset
* u
->frame_size
);
527 chunk
.memblock
= pa_memblock_new_fixed(u
->core
->mempool
, p
, frames
* u
->frame_size
, TRUE
);
528 chunk
.length
= pa_memblock_get_length(chunk
.memblock
);
531 pa_sink_render_into_full(u
->sink
, &chunk
);
532 pa_memblock_unref_fixed(chunk
.memblock
);
534 if (PA_UNLIKELY((sframes
= snd_pcm_mmap_commit(u
->pcm_handle
, offset
, frames
)) < 0)) {
536 if ((r
= try_recover(u
, "snd_pcm_mmap_commit", (int) sframes
)) == 0)
544 u
->write_count
+= frames
* u
->frame_size
;
545 u
->since_start
+= frames
* u
->frame_size
;
548 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames
* u
->frame_size
), (unsigned long) n_bytes
);
551 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
554 n_bytes
-= (size_t) frames
* u
->frame_size
;
558 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
560 if (*sleep_usec
> process_usec
)
561 *sleep_usec
-= process_usec
;
565 return work_done
? 1 : 0;
568 static int unix_write(struct userdata
*u
, pa_usec_t
*sleep_usec
, pa_bool_t polled
) {
569 pa_bool_t work_done
= FALSE
;
570 pa_usec_t max_sleep_usec
= 0, process_usec
= 0;
575 pa_sink_assert_ref(u
->sink
);
578 hw_sleep_time(u
, &max_sleep_usec
, &process_usec
);
585 if (PA_UNLIKELY((n
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
587 if ((r
= try_recover(u
, "snd_pcm_avail", (int) n
)) == 0)
593 n_bytes
= (size_t) n
* u
->frame_size
;
594 left_to_play
= check_left_to_play(u
, n_bytes
);
598 /* We won't fill up the playback buffer before at least
599 * half the sleep time is over because otherwise we might
600 * ask for more data from the clients then they expect. We
601 * need to guarantee that clients only have to keep around
602 * a single hw buffer length. */
605 pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
) > process_usec
+max_sleep_usec
/2)
608 if (PA_UNLIKELY(n_bytes
<= u
->hwbuf_unused
)) {
612 char *dn
= pa_alsa_get_driver_name_by_pcm(u
->pcm_handle
);
613 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
614 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
615 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
625 pa_log_debug("Not filling up, because already too many iterations.");
631 n_bytes
-= u
->hwbuf_unused
;
635 snd_pcm_sframes_t frames
;
637 pa_bool_t after_avail
= TRUE
;
639 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
641 if (u
->memchunk
.length
<= 0)
642 pa_sink_render(u
->sink
, n_bytes
, &u
->memchunk
);
644 pa_assert(u
->memchunk
.length
> 0);
646 frames
= (snd_pcm_sframes_t
) (u
->memchunk
.length
/ u
->frame_size
);
648 if (frames
> (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
))
649 frames
= (snd_pcm_sframes_t
) (n_bytes
/u
->frame_size
);
651 p
= pa_memblock_acquire(u
->memchunk
.memblock
);
652 frames
= snd_pcm_writei(u
->pcm_handle
, (const uint8_t*) p
+ u
->memchunk
.index
, (snd_pcm_uframes_t
) frames
);
653 pa_memblock_release(u
->memchunk
.memblock
);
655 if (PA_UNLIKELY(frames
< 0)) {
657 if (!after_avail
&& (int) frames
== -EAGAIN
)
660 if ((r
= try_recover(u
, "snd_pcm_writei", (int) frames
)) == 0)
666 if (!after_avail
&& frames
== 0)
669 pa_assert(frames
> 0);
672 u
->memchunk
.index
+= (size_t) frames
* u
->frame_size
;
673 u
->memchunk
.length
-= (size_t) frames
* u
->frame_size
;
675 if (u
->memchunk
.length
<= 0) {
676 pa_memblock_unref(u
->memchunk
.memblock
);
677 pa_memchunk_reset(&u
->memchunk
);
682 u
->write_count
+= frames
* u
->frame_size
;
683 u
->since_start
+= frames
* u
->frame_size
;
685 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
687 if ((size_t) frames
* u
->frame_size
>= n_bytes
)
690 n_bytes
-= (size_t) frames
* u
->frame_size
;
694 *sleep_usec
= pa_bytes_to_usec(left_to_play
, &u
->sink
->sample_spec
);
696 if (*sleep_usec
> process_usec
)
697 *sleep_usec
-= process_usec
;
701 return work_done
? 1 : 0;
704 static void update_smoother(struct userdata
*u
) {
705 snd_pcm_sframes_t delay
= 0;
708 pa_usec_t now1
= 0, now2
;
709 snd_pcm_status_t
*status
;
711 snd_pcm_status_alloca(&status
);
714 pa_assert(u
->pcm_handle
);
716 /* Let's update the time smoother */
718 if (PA_UNLIKELY((err
= pa_alsa_safe_delay(u
->pcm_handle
, &delay
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
719 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err
));
723 if (PA_UNLIKELY((err
= snd_pcm_status(u
->pcm_handle
, status
)) < 0))
724 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err
));
726 snd_htimestamp_t htstamp
= { 0, 0 };
727 snd_pcm_status_get_htstamp(status
, &htstamp
);
728 now1
= pa_timespec_load(&htstamp
);
731 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
733 now1
= pa_rtclock_now();
735 /* check if the time since the last update is bigger than the interval */
736 if (u
->last_smoother_update
> 0)
737 if (u
->last_smoother_update
+ u
->smoother_interval
> now1
)
740 position
= (int64_t) u
->write_count
- ((int64_t) delay
* (int64_t) u
->frame_size
);
742 if (PA_UNLIKELY(position
< 0))
745 now2
= pa_bytes_to_usec((uint64_t) position
, &u
->sink
->sample_spec
);
747 pa_smoother_put(u
->smoother
, now1
, now2
);
749 u
->last_smoother_update
= now1
;
750 /* exponentially increase the update interval up to the MAX limit */
751 u
->smoother_interval
= PA_MIN (u
->smoother_interval
* 2, SMOOTHER_MAX_INTERVAL
);
754 static pa_usec_t
sink_get_latency(struct userdata
*u
) {
757 pa_usec_t now1
, now2
;
761 now1
= pa_rtclock_now();
762 now2
= pa_smoother_get(u
->smoother
, now1
);
764 delay
= (int64_t) pa_bytes_to_usec(u
->write_count
, &u
->sink
->sample_spec
) - (int64_t) now2
;
766 r
= delay
>= 0 ? (pa_usec_t
) delay
: 0;
768 if (u
->memchunk
.memblock
)
769 r
+= pa_bytes_to_usec(u
->memchunk
.length
, &u
->sink
->sample_spec
);
774 static int build_pollfd(struct userdata
*u
) {
776 pa_assert(u
->pcm_handle
);
778 if (u
->alsa_rtpoll_item
)
779 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
781 if (!(u
->alsa_rtpoll_item
= pa_alsa_build_pollfd(u
->pcm_handle
, u
->rtpoll
)))
787 /* Called from IO context */
788 static int suspend(struct userdata
*u
) {
790 pa_assert(u
->pcm_handle
);
792 pa_smoother_pause(u
->smoother
, pa_rtclock_now());
794 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
795 * take awfully long with our long buffer sizes today. */
796 snd_pcm_close(u
->pcm_handle
);
797 u
->pcm_handle
= NULL
;
799 if (u
->alsa_rtpoll_item
) {
800 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
801 u
->alsa_rtpoll_item
= NULL
;
804 pa_log_info("Device suspended...");
809 /* Called from IO context */
810 static int update_sw_params(struct userdata
*u
) {
811 snd_pcm_uframes_t avail_min
;
816 /* Use the full buffer if noone asked us for anything specific */
822 if ((latency
= pa_sink_get_requested_latency_within_thread(u
->sink
)) != (pa_usec_t
) -1) {
825 pa_log_debug("Latency set to %0.2fms", (double) latency
/ PA_USEC_PER_MSEC
);
827 b
= pa_usec_to_bytes(latency
, &u
->sink
->sample_spec
);
829 /* We need at least one sample in our buffer */
831 if (PA_UNLIKELY(b
< u
->frame_size
))
834 u
->hwbuf_unused
= PA_LIKELY(b
< u
->hwbuf_size
) ? (u
->hwbuf_size
- b
) : 0;
837 fix_min_sleep_wakeup(u
);
838 fix_tsched_watermark(u
);
841 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u
->hwbuf_unused
);
843 /* We need at last one frame in the used part of the buffer */
844 avail_min
= (snd_pcm_uframes_t
) u
->hwbuf_unused
/ u
->frame_size
+ 1;
847 pa_usec_t sleep_usec
, process_usec
;
849 hw_sleep_time(u
, &sleep_usec
, &process_usec
);
850 avail_min
+= pa_usec_to_bytes(sleep_usec
, &u
->sink
->sample_spec
) / u
->frame_size
;
853 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min
);
855 if ((err
= pa_alsa_set_sw_params(u
->pcm_handle
, avail_min
)) < 0) {
856 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err
));
860 pa_sink_set_max_request_within_thread(u
->sink
, u
->hwbuf_size
- u
->hwbuf_unused
);
865 /* Called from IO context */
866 static int unsuspend(struct userdata
*u
) {
871 snd_pcm_uframes_t period_size
;
874 pa_assert(!u
->pcm_handle
);
876 pa_log_info("Trying resume...");
878 if ((err
= snd_pcm_open(&u
->pcm_handle
, u
->device_name
, SND_PCM_STREAM_PLAYBACK
,
879 /*SND_PCM_NONBLOCK|*/
880 SND_PCM_NO_AUTO_RESAMPLE
|
881 SND_PCM_NO_AUTO_CHANNELS
|
882 SND_PCM_NO_AUTO_FORMAT
)) < 0) {
883 pa_log("Error opening PCM device %s: %s", u
->device_name
, pa_alsa_strerror(err
));
887 ss
= u
->sink
->sample_spec
;
888 nfrags
= u
->nfragments
;
889 period_size
= u
->fragment_size
/ u
->frame_size
;
893 if ((err
= pa_alsa_set_hw_params(u
->pcm_handle
, &ss
, &nfrags
, &period_size
, u
->hwbuf_size
/ u
->frame_size
, &b
, &d
, TRUE
)) < 0) {
894 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err
));
898 if (b
!= u
->use_mmap
|| d
!= u
->use_tsched
) {
899 pa_log_warn("Resume failed, couldn't get original access mode.");
903 if (!pa_sample_spec_equal(&ss
, &u
->sink
->sample_spec
)) {
904 pa_log_warn("Resume failed, couldn't restore original sample settings.");
908 if (nfrags
!= u
->nfragments
|| period_size
*u
->frame_size
!= u
->fragment_size
) {
909 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
910 (unsigned long) u
->nfragments
, (unsigned long) u
->fragment_size
,
911 (unsigned long) nfrags
, period_size
* u
->frame_size
);
915 if (update_sw_params(u
) < 0)
918 if (build_pollfd(u
) < 0)
922 pa_smoother_reset(u
->smoother
, pa_rtclock_now(), TRUE
);
923 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
924 u
->last_smoother_update
= 0;
929 pa_log_info("Resumed successfully...");
935 snd_pcm_close(u
->pcm_handle
);
936 u
->pcm_handle
= NULL
;
942 /* Called from IO context */
943 static int sink_process_msg(pa_msgobject
*o
, int code
, void *data
, int64_t offset
, pa_memchunk
*chunk
) {
944 struct userdata
*u
= PA_SINK(o
)->userdata
;
948 case PA_SINK_MESSAGE_GET_LATENCY
: {
952 r
= sink_get_latency(u
);
954 *((pa_usec_t
*) data
) = r
;
959 case PA_SINK_MESSAGE_SET_STATE
:
961 switch ((pa_sink_state_t
) PA_PTR_TO_UINT(data
)) {
963 case PA_SINK_SUSPENDED
:
964 pa_assert(PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
));
972 case PA_SINK_RUNNING
:
974 if (u
->sink
->thread_info
.state
== PA_SINK_INIT
) {
975 if (build_pollfd(u
) < 0)
979 if (u
->sink
->thread_info
.state
== PA_SINK_SUSPENDED
) {
980 if (unsuspend(u
) < 0)
986 case PA_SINK_UNLINKED
:
988 case PA_SINK_INVALID_STATE
:
995 return pa_sink_process_msg(o
, code
, data
, offset
, chunk
);
998 /* Called from main context */
999 static int sink_set_state_cb(pa_sink
*s
, pa_sink_state_t new_state
) {
1000 pa_sink_state_t old_state
;
1003 pa_sink_assert_ref(s
);
1004 pa_assert_se(u
= s
->userdata
);
1006 old_state
= pa_sink_get_state(u
->sink
);
1008 if (PA_SINK_IS_OPENED(old_state
) && new_state
== PA_SINK_SUSPENDED
)
1010 else if (old_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(new_state
))
1011 if (reserve_init(u
, u
->device_name
) < 0)
1017 static int mixer_callback(snd_mixer_elem_t
*elem
, unsigned int mask
) {
1018 struct userdata
*u
= snd_mixer_elem_get_callback_private(elem
);
1021 pa_assert(u
->mixer_handle
);
1023 if (mask
== SND_CTL_EVENT_MASK_REMOVE
)
1026 if (mask
& SND_CTL_EVENT_MASK_VALUE
) {
1027 pa_sink_get_volume(u
->sink
, TRUE
);
1028 pa_sink_get_mute(u
->sink
, TRUE
);
1034 static void sink_get_volume_cb(pa_sink
*s
) {
1035 struct userdata
*u
= s
->userdata
;
1037 char t
[PA_CVOLUME_SNPRINT_MAX
];
1040 pa_assert(u
->mixer_path
);
1041 pa_assert(u
->mixer_handle
);
1043 if (pa_alsa_path_get_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1046 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1047 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1049 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1051 if (pa_cvolume_equal(&u
->hardware_volume
, &r
))
1054 s
->real_volume
= u
->hardware_volume
= r
;
1056 /* Hmm, so the hardware volume changed, let's reset our software volume */
1057 if (u
->mixer_path
->has_dB
)
1058 pa_sink_set_soft_volume(s
, NULL
);
1061 static void sink_set_volume_cb(pa_sink
*s
) {
1062 struct userdata
*u
= s
->userdata
;
1064 char t
[PA_CVOLUME_SNPRINT_MAX
];
1067 pa_assert(u
->mixer_path
);
1068 pa_assert(u
->mixer_handle
);
1070 /* Shift up by the base volume */
1071 pa_sw_cvolume_divide_scalar(&r
, &s
->real_volume
, s
->base_volume
);
1073 if (pa_alsa_path_set_volume(u
->mixer_path
, u
->mixer_handle
, &s
->channel_map
, &r
) < 0)
1076 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1077 pa_sw_cvolume_multiply_scalar(&r
, &r
, s
->base_volume
);
1079 u
->hardware_volume
= r
;
1081 if (u
->mixer_path
->has_dB
) {
1082 pa_cvolume new_soft_volume
;
1083 pa_bool_t accurate_enough
;
1085 /* Match exactly what the user requested by software */
1086 pa_sw_cvolume_divide(&new_soft_volume
, &s
->real_volume
, &u
->hardware_volume
);
1088 /* If the adjustment to do in software is only minimal we
1089 * can skip it. That saves us CPU at the expense of a bit of
1092 (pa_cvolume_min(&new_soft_volume
) >= (PA_VOLUME_NORM
- VOLUME_ACCURACY
)) &&
1093 (pa_cvolume_max(&new_soft_volume
) <= (PA_VOLUME_NORM
+ VOLUME_ACCURACY
));
1095 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &s
->real_volume
));
1096 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &u
->hardware_volume
));
1097 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t
, sizeof(t
), &new_soft_volume
),
1098 pa_yes_no(accurate_enough
));
1100 if (!accurate_enough
)
1101 s
->soft_volume
= new_soft_volume
;
1104 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t
, sizeof(t
), &r
));
1106 /* We can't match exactly what the user requested, hence let's
1107 * at least tell the user about it */
1113 static void sink_get_mute_cb(pa_sink
*s
) {
1114 struct userdata
*u
= s
->userdata
;
1118 pa_assert(u
->mixer_path
);
1119 pa_assert(u
->mixer_handle
);
1121 if (pa_alsa_path_get_mute(u
->mixer_path
, u
->mixer_handle
, &b
) < 0)
1127 static void sink_set_mute_cb(pa_sink
*s
) {
1128 struct userdata
*u
= s
->userdata
;
1131 pa_assert(u
->mixer_path
);
1132 pa_assert(u
->mixer_handle
);
1134 pa_alsa_path_set_mute(u
->mixer_path
, u
->mixer_handle
, s
->muted
);
1137 static int sink_set_port_cb(pa_sink
*s
, pa_device_port
*p
) {
1138 struct userdata
*u
= s
->userdata
;
1139 pa_alsa_port_data
*data
;
1143 pa_assert(u
->mixer_handle
);
1145 data
= PA_DEVICE_PORT_DATA(p
);
1147 pa_assert_se(u
->mixer_path
= data
->path
);
1148 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1150 if (u
->mixer_path
->has_volume
&& u
->mixer_path
->has_dB
) {
1151 s
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1152 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
1154 if (u
->mixer_path
->max_dB
> 0.0)
1155 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s
->base_volume
));
1157 pa_log_info("No particular base volume set, fixing to 0 dB");
1159 s
->base_volume
= PA_VOLUME_NORM
;
1160 s
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1164 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1174 static void sink_update_requested_latency_cb(pa_sink
*s
) {
1175 struct userdata
*u
= s
->userdata
;
1182 before
= u
->hwbuf_unused
;
1183 update_sw_params(u
);
1185 /* Let's check whether we now use only a smaller part of the
1186 buffer then before. If so, we need to make sure that subsequent
1187 rewinds are relative to the new maximum fill level and not to the
1188 current fill level. Thus, let's do a full rewind once, to clear
1191 if (u
->hwbuf_unused
> before
) {
1192 pa_log_debug("Requesting rewind due to latency change.");
1193 pa_sink_request_rewind(s
, (size_t) -1);
1197 static int process_rewind(struct userdata
*u
) {
1198 snd_pcm_sframes_t unused
;
1199 size_t rewind_nbytes
, unused_nbytes
, limit_nbytes
;
1202 /* Figure out how much we shall rewind and reset the counter */
1203 rewind_nbytes
= u
->sink
->thread_info
.rewind_nbytes
;
1205 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes
);
1207 if (PA_UNLIKELY((unused
= pa_alsa_safe_avail(u
->pcm_handle
, u
->hwbuf_size
, &u
->sink
->sample_spec
)) < 0)) {
1208 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused
));
1212 unused_nbytes
= u
->tsched_watermark
+ (size_t) unused
* u
->frame_size
;
1214 if (u
->hwbuf_size
> unused_nbytes
)
1215 limit_nbytes
= u
->hwbuf_size
- unused_nbytes
;
1219 if (rewind_nbytes
> limit_nbytes
)
1220 rewind_nbytes
= limit_nbytes
;
1222 if (rewind_nbytes
> 0) {
1223 snd_pcm_sframes_t in_frames
, out_frames
;
1225 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes
);
1227 in_frames
= (snd_pcm_sframes_t
) (rewind_nbytes
/ u
->frame_size
);
1228 pa_log_debug("before: %lu", (unsigned long) in_frames
);
1229 if ((out_frames
= snd_pcm_rewind(u
->pcm_handle
, (snd_pcm_uframes_t
) in_frames
)) < 0) {
1230 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames
));
1231 if (try_recover(u
, "process_rewind", out_frames
) < 0)
1236 pa_log_debug("after: %lu", (unsigned long) out_frames
);
1238 rewind_nbytes
= (size_t) out_frames
* u
->frame_size
;
1240 if (rewind_nbytes
<= 0)
1241 pa_log_info("Tried rewind, but was apparently not possible.");
1243 u
->write_count
-= rewind_nbytes
;
1244 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes
);
1245 pa_sink_process_rewind(u
->sink
, rewind_nbytes
);
1247 u
->after_rewind
= TRUE
;
1251 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1253 pa_sink_process_rewind(u
->sink
, 0);
1257 static void thread_func(void *userdata
) {
1258 struct userdata
*u
= userdata
;
1259 unsigned short revents
= 0;
1263 pa_log_debug("Thread starting up");
1265 if (u
->core
->realtime_scheduling
)
1266 pa_make_realtime(u
->core
->realtime_priority
);
1268 pa_thread_mq_install(&u
->thread_mq
);
1274 pa_log_debug("Loop");
1277 /* Render some data and write it to the dsp */
1278 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1280 pa_usec_t sleep_usec
= 0;
1282 if (PA_UNLIKELY(u
->sink
->thread_info
.rewind_requested
))
1283 if (process_rewind(u
) < 0)
1287 work_done
= mmap_write(u
, &sleep_usec
, revents
& POLLOUT
);
1289 work_done
= unix_write(u
, &sleep_usec
, revents
& POLLOUT
);
1294 /* pa_log_debug("work_done = %i", work_done); */
1299 pa_log_info("Starting playback.");
1300 snd_pcm_start(u
->pcm_handle
);
1302 pa_smoother_resume(u
->smoother
, pa_rtclock_now(), TRUE
);
1308 if (u
->use_tsched
) {
1311 if (u
->since_start
<= u
->hwbuf_size
) {
1313 /* USB devices on ALSA seem to hit a buffer
1314 * underrun during the first iterations much
1315 * quicker then we calculate here, probably due to
1316 * the transport latency. To accommodate for that
1317 * we artificially decrease the sleep time until
1318 * we have filled the buffer at least once
1321 if (pa_log_ratelimit())
1322 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1326 /* OK, the playback buffer is now full, let's
1327 * calculate when to wake up next */
1328 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1330 /* Convert from the sound card time domain to the
1331 * system time domain */
1332 cusec
= pa_smoother_translate(u
->smoother
, pa_rtclock_now(), sleep_usec
);
1334 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1336 /* We don't trust the conversion, so we wake up whatever comes first */
1337 pa_rtpoll_set_timer_relative(u
->rtpoll
, PA_MIN(sleep_usec
, cusec
));
1341 u
->after_rewind
= FALSE
;
1343 } else if (u
->use_tsched
)
1345 /* OK, we're in an invalid state, let's disable our timers */
1346 pa_rtpoll_set_timer_disabled(u
->rtpoll
);
1348 /* Hmm, nothing to do. Let's sleep */
1349 if ((ret
= pa_rtpoll_run(u
->rtpoll
, TRUE
)) < 0)
1355 /* Tell ALSA about this and process its response */
1356 if (PA_SINK_IS_OPENED(u
->sink
->thread_info
.state
)) {
1357 struct pollfd
*pollfd
;
1361 pollfd
= pa_rtpoll_item_get_pollfd(u
->alsa_rtpoll_item
, &n
);
1363 if ((err
= snd_pcm_poll_descriptors_revents(u
->pcm_handle
, pollfd
, n
, &revents
)) < 0) {
1364 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err
));
1368 if (revents
& ~POLLOUT
) {
1369 if (pa_alsa_recover_from_poll(u
->pcm_handle
, revents
) < 0)
1374 } else if (revents
&& u
->use_tsched
&& pa_log_ratelimit())
1375 pa_log_debug("Wakeup from ALSA!");
1382 /* If this was no regular exit from the loop we have to continue
1383 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1384 pa_asyncmsgq_post(u
->thread_mq
.outq
, PA_MSGOBJECT(u
->core
), PA_CORE_MESSAGE_UNLOAD_MODULE
, u
->module
, 0, NULL
, NULL
);
1385 pa_asyncmsgq_wait_for(u
->thread_mq
.inq
, PA_MESSAGE_SHUTDOWN
);
1388 pa_log_debug("Thread shutting down");
1391 static void set_sink_name(pa_sink_new_data
*data
, pa_modargs
*ma
, const char *device_id
, const char *device_name
, pa_alsa_mapping
*mapping
) {
1397 pa_assert(device_name
);
1399 if ((n
= pa_modargs_get_value(ma
, "sink_name", NULL
))) {
1400 pa_sink_new_data_set_name(data
, n
);
1401 data
->namereg_fail
= TRUE
;
1405 if ((n
= pa_modargs_get_value(ma
, "name", NULL
)))
1406 data
->namereg_fail
= TRUE
;
1408 n
= device_id
? device_id
: device_name
;
1409 data
->namereg_fail
= FALSE
;
1413 t
= pa_sprintf_malloc("alsa_output.%s.%s", n
, mapping
->name
);
1415 t
= pa_sprintf_malloc("alsa_output.%s", n
);
1417 pa_sink_new_data_set_name(data
, t
);
1421 static void find_mixer(struct userdata
*u
, pa_alsa_mapping
*mapping
, const char *element
, pa_bool_t ignore_dB
) {
1423 if (!mapping
&& !element
)
1426 if (!(u
->mixer_handle
= pa_alsa_open_mixer_for_pcm(u
->pcm_handle
, &u
->control_device
))) {
1427 pa_log_info("Failed to find a working mixer device.");
1433 if (!(u
->mixer_path
= pa_alsa_path_synthesize(element
, PA_ALSA_DIRECTION_OUTPUT
)))
1436 if (pa_alsa_path_probe(u
->mixer_path
, u
->mixer_handle
, ignore_dB
) < 0)
1439 pa_log_debug("Probed mixer path %s:", u
->mixer_path
->name
);
1440 pa_alsa_path_dump(u
->mixer_path
);
1443 if (!(u
->mixer_path_set
= pa_alsa_path_set_new(mapping
, PA_ALSA_DIRECTION_OUTPUT
)))
1446 pa_alsa_path_set_probe(u
->mixer_path_set
, u
->mixer_handle
, ignore_dB
);
1448 pa_log_debug("Probed mixer paths:");
1449 pa_alsa_path_set_dump(u
->mixer_path_set
);
1456 if (u
->mixer_path_set
) {
1457 pa_alsa_path_set_free(u
->mixer_path_set
);
1458 u
->mixer_path_set
= NULL
;
1459 } else if (u
->mixer_path
) {
1460 pa_alsa_path_free(u
->mixer_path
);
1461 u
->mixer_path
= NULL
;
1464 if (u
->mixer_handle
) {
1465 snd_mixer_close(u
->mixer_handle
);
1466 u
->mixer_handle
= NULL
;
1470 static int setup_mixer(struct userdata
*u
, pa_bool_t ignore_dB
) {
1473 if (!u
->mixer_handle
)
1476 if (u
->sink
->active_port
) {
1477 pa_alsa_port_data
*data
;
1479 /* We have a list of supported paths, so let's activate the
1480 * one that has been chosen as active */
1482 data
= PA_DEVICE_PORT_DATA(u
->sink
->active_port
);
1483 u
->mixer_path
= data
->path
;
1485 pa_alsa_path_select(data
->path
, u
->mixer_handle
);
1488 pa_alsa_setting_select(data
->setting
, u
->mixer_handle
);
1492 if (!u
->mixer_path
&& u
->mixer_path_set
)
1493 u
->mixer_path
= u
->mixer_path_set
->paths
;
1495 if (u
->mixer_path
) {
1496 /* Hmm, we have only a single path, then let's activate it */
1498 pa_alsa_path_select(u
->mixer_path
, u
->mixer_handle
);
1500 if (u
->mixer_path
->settings
)
1501 pa_alsa_setting_select(u
->mixer_path
->settings
, u
->mixer_handle
);
1506 if (!u
->mixer_path
->has_volume
)
1507 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1510 if (u
->mixer_path
->has_dB
) {
1511 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u
->mixer_path
->min_dB
, u
->mixer_path
->max_dB
);
1513 u
->sink
->base_volume
= pa_sw_volume_from_dB(-u
->mixer_path
->max_dB
);
1514 u
->sink
->n_volume_steps
= PA_VOLUME_NORM
+1;
1516 if (u
->mixer_path
->max_dB
> 0.0)
1517 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u
->sink
->base_volume
));
1519 pa_log_info("No particular base volume set, fixing to 0 dB");
1522 pa_log_info("Hardware volume ranges from %li to %li.", u
->mixer_path
->min_volume
, u
->mixer_path
->max_volume
);
1523 u
->sink
->base_volume
= PA_VOLUME_NORM
;
1524 u
->sink
->n_volume_steps
= u
->mixer_path
->max_volume
- u
->mixer_path
->min_volume
+ 1;
1527 u
->sink
->get_volume
= sink_get_volume_cb
;
1528 u
->sink
->set_volume
= sink_set_volume_cb
;
1530 u
->sink
->flags
|= PA_SINK_HW_VOLUME_CTRL
| (u
->mixer_path
->has_dB
? PA_SINK_DECIBEL_VOLUME
: 0);
1531 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u
->mixer_path
->has_dB
? "supported" : "not supported");
1534 if (!u
->mixer_path
->has_mute
) {
1535 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1537 u
->sink
->get_mute
= sink_get_mute_cb
;
1538 u
->sink
->set_mute
= sink_set_mute_cb
;
1539 u
->sink
->flags
|= PA_SINK_HW_MUTE_CTRL
;
1540 pa_log_info("Using hardware mute control.");
1543 u
->mixer_fdl
= pa_alsa_fdlist_new();
1545 if (pa_alsa_fdlist_set_mixer(u
->mixer_fdl
, u
->mixer_handle
, u
->core
->mainloop
) < 0) {
1546 pa_log("Failed to initialize file descriptor monitoring");
1550 if (u
->mixer_path_set
)
1551 pa_alsa_path_set_set_callback(u
->mixer_path_set
, u
->mixer_handle
, mixer_callback
, u
);
1553 pa_alsa_path_set_callback(u
->mixer_path
, u
->mixer_handle
, mixer_callback
, u
);
1558 pa_sink
*pa_alsa_sink_new(pa_module
*m
, pa_modargs
*ma
, const char*driver
, pa_card
*card
, pa_alsa_mapping
*mapping
) {
1560 struct userdata
*u
= NULL
;
1561 const char *dev_id
= NULL
;
1562 pa_sample_spec ss
, requested_ss
;
1564 uint32_t nfrags
, hwbuf_size
, frag_size
, tsched_size
, tsched_watermark
;
1565 snd_pcm_uframes_t period_frames
, tsched_frames
;
1567 pa_bool_t use_mmap
= TRUE
, b
, use_tsched
= TRUE
, d
, ignore_dB
= FALSE
;
1568 pa_sink_new_data data
;
1569 pa_alsa_profile_set
*profile_set
= NULL
;
1574 ss
= m
->core
->default_sample_spec
;
1575 map
= m
->core
->default_channel_map
;
1576 if (pa_modargs_get_sample_spec_and_channel_map(ma
, &ss
, &map
, PA_CHANNEL_MAP_ALSA
) < 0) {
1577 pa_log("Failed to parse sample specification and channel map");
1582 frame_size
= pa_frame_size(&ss
);
1584 nfrags
= m
->core
->default_n_fragments
;
1585 frag_size
= (uint32_t) pa_usec_to_bytes(m
->core
->default_fragment_size_msec
*PA_USEC_PER_MSEC
, &ss
);
1587 frag_size
= (uint32_t) frame_size
;
1588 tsched_size
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC
, &ss
);
1589 tsched_watermark
= (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC
, &ss
);
1591 if (pa_modargs_get_value_u32(ma
, "fragments", &nfrags
) < 0 ||
1592 pa_modargs_get_value_u32(ma
, "fragment_size", &frag_size
) < 0 ||
1593 pa_modargs_get_value_u32(ma
, "tsched_buffer_size", &tsched_size
) < 0 ||
1594 pa_modargs_get_value_u32(ma
, "tsched_buffer_watermark", &tsched_watermark
) < 0) {
1595 pa_log("Failed to parse buffer metrics");
1599 hwbuf_size
= frag_size
* nfrags
;
1600 period_frames
= frag_size
/frame_size
;
1601 tsched_frames
= tsched_size
/frame_size
;
1603 if (pa_modargs_get_value_boolean(ma
, "mmap", &use_mmap
) < 0) {
1604 pa_log("Failed to parse mmap argument.");
1608 if (pa_modargs_get_value_boolean(ma
, "tsched", &use_tsched
) < 0) {
1609 pa_log("Failed to parse tsched argument.");
1613 if (pa_modargs_get_value_boolean(ma
, "ignore_dB", &ignore_dB
) < 0) {
1614 pa_log("Failed to parse ignore_dB argument.");
1618 if (use_tsched
&& !pa_rtclock_hrtimer()) {
1619 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1623 u
= pa_xnew0(struct userdata
, 1);
1626 u
->use_mmap
= use_mmap
;
1627 u
->use_tsched
= use_tsched
;
1629 u
->rtpoll
= pa_rtpoll_new();
1630 pa_thread_mq_init(&u
->thread_mq
, m
->core
->mainloop
, u
->rtpoll
);
1632 u
->smoother
= pa_smoother_new(
1633 DEFAULT_TSCHED_BUFFER_USEC
*2,
1634 DEFAULT_TSCHED_BUFFER_USEC
*2,
1640 u
->smoother_interval
= SMOOTHER_MIN_INTERVAL
;
1642 dev_id
= pa_modargs_get_value(
1644 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
));
1646 if (reserve_init(u
, dev_id
) < 0)
1649 if (reserve_monitor_init(u
, dev_id
) < 0)
1657 if (!(dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1658 pa_log("device_id= not set");
1662 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_mapping(
1666 SND_PCM_STREAM_PLAYBACK
,
1667 &nfrags
, &period_frames
, tsched_frames
,
1672 } else if ((dev_id
= pa_modargs_get_value(ma
, "device_id", NULL
))) {
1674 if (!(profile_set
= pa_alsa_profile_set_new(NULL
, &map
)))
1677 if (!(u
->pcm_handle
= pa_alsa_open_by_device_id_auto(
1681 SND_PCM_STREAM_PLAYBACK
,
1682 &nfrags
, &period_frames
, tsched_frames
,
1683 &b
, &d
, profile_set
, &mapping
)))
1689 if (!(u
->pcm_handle
= pa_alsa_open_by_device_string(
1690 pa_modargs_get_value(ma
, "device", DEFAULT_DEVICE
),
1693 SND_PCM_STREAM_PLAYBACK
,
1694 &nfrags
, &period_frames
, tsched_frames
,
1699 pa_assert(u
->device_name
);
1700 pa_log_info("Successfully opened device %s.", u
->device_name
);
1702 if (pa_alsa_pcm_is_modem(u
->pcm_handle
)) {
1703 pa_log_notice("Device %s is modem, refusing further initialization.", u
->device_name
);
1708 pa_log_info("Selected mapping '%s' (%s).", mapping
->description
, mapping
->name
);
1710 if (use_mmap
&& !b
) {
1711 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1712 u
->use_mmap
= use_mmap
= FALSE
;
1715 if (use_tsched
&& (!b
|| !d
)) {
1716 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1717 u
->use_tsched
= use_tsched
= FALSE
;
1720 if (use_tsched
&& !pa_alsa_pcm_is_hw(u
->pcm_handle
)) {
1721 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1722 u
->use_tsched
= use_tsched
= FALSE
;
1726 pa_log_info("Successfully enabled mmap() mode.");
1729 pa_log_info("Successfully enabled timer-based scheduling mode.");
1731 /* ALSA might tweak the sample spec, so recalculate the frame size */
1732 frame_size
= pa_frame_size(&ss
);
1734 find_mixer(u
, mapping
, pa_modargs_get_value(ma
, "control", NULL
), ignore_dB
);
1736 pa_sink_new_data_init(&data
);
1737 data
.driver
= driver
;
1740 set_sink_name(&data
, ma
, dev_id
, u
->device_name
, mapping
);
1741 pa_sink_new_data_set_sample_spec(&data
, &ss
);
1742 pa_sink_new_data_set_channel_map(&data
, &map
);
1744 pa_alsa_init_proplist_pcm(m
->core
, data
.proplist
, u
->pcm_handle
);
1745 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_STRING
, u
->device_name
);
1746 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
* nfrags
));
1747 pa_proplist_setf(data
.proplist
, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE
, "%lu", (unsigned long) (period_frames
* frame_size
));
1748 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_ACCESS_MODE
, u
->use_tsched
? "mmap+timer" : (u
->use_mmap
? "mmap" : "serial"));
1751 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_NAME
, mapping
->name
);
1752 pa_proplist_sets(data
.proplist
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
, mapping
->description
);
1755 pa_alsa_init_description(data
.proplist
);
1757 if (u
->control_device
)
1758 pa_alsa_init_proplist_ctl(data
.proplist
, u
->control_device
);
1760 if (pa_modargs_get_proplist(ma
, "sink_properties", data
.proplist
, PA_UPDATE_REPLACE
) < 0) {
1761 pa_log("Invalid properties");
1762 pa_sink_new_data_done(&data
);
1766 if (u
->mixer_path_set
)
1767 pa_alsa_add_ports(&data
.ports
, u
->mixer_path_set
);
1769 u
->sink
= pa_sink_new(m
->core
, &data
, PA_SINK_HARDWARE
|PA_SINK_LATENCY
|(u
->use_tsched
? PA_SINK_DYNAMIC_LATENCY
: 0));
1770 pa_sink_new_data_done(&data
);
1773 pa_log("Failed to create sink object");
1777 u
->sink
->parent
.process_msg
= sink_process_msg
;
1778 u
->sink
->update_requested_latency
= sink_update_requested_latency_cb
;
1779 u
->sink
->set_state
= sink_set_state_cb
;
1780 u
->sink
->set_port
= sink_set_port_cb
;
1781 u
->sink
->userdata
= u
;
1783 pa_sink_set_asyncmsgq(u
->sink
, u
->thread_mq
.inq
);
1784 pa_sink_set_rtpoll(u
->sink
, u
->rtpoll
);
1786 u
->frame_size
= frame_size
;
1787 u
->fragment_size
= frag_size
= (uint32_t) (period_frames
* frame_size
);
1788 u
->nfragments
= nfrags
;
1789 u
->hwbuf_size
= u
->fragment_size
* nfrags
;
1790 u
->tsched_watermark
= pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark
, &requested_ss
), &u
->sink
->sample_spec
);
1791 pa_cvolume_mute(&u
->hardware_volume
, u
->sink
->sample_spec
.channels
);
1793 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1794 nfrags
, (long unsigned) u
->fragment_size
,
1795 (double) pa_bytes_to_usec(u
->hwbuf_size
, &ss
) / PA_USEC_PER_MSEC
);
1797 pa_sink_set_max_request(u
->sink
, u
->hwbuf_size
);
1798 pa_sink_set_max_rewind(u
->sink
, u
->hwbuf_size
);
1800 if (u
->use_tsched
) {
1801 u
->watermark_step
= pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC
, &u
->sink
->sample_spec
);
1803 fix_min_sleep_wakeup(u
);
1804 fix_tsched_watermark(u
);
1806 pa_sink_set_latency_range(u
->sink
,
1808 pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1810 pa_log_info("Time scheduling watermark is %0.2fms",
1811 (double) pa_bytes_to_usec(u
->tsched_watermark
, &ss
) / PA_USEC_PER_MSEC
);
1813 pa_sink_set_fixed_latency(u
->sink
, pa_bytes_to_usec(u
->hwbuf_size
, &ss
));
1817 if (update_sw_params(u
) < 0)
1820 if (setup_mixer(u
, ignore_dB
) < 0)
1823 pa_alsa_dump(PA_LOG_DEBUG
, u
->pcm_handle
);
1825 if (!(u
->thread
= pa_thread_new(thread_func
, u
))) {
1826 pa_log("Failed to create thread.");
1830 /* Get initial mixer settings */
1831 if (data
.volume_is_set
) {
1832 if (u
->sink
->set_volume
)
1833 u
->sink
->set_volume(u
->sink
);
1835 if (u
->sink
->get_volume
)
1836 u
->sink
->get_volume(u
->sink
);
1839 if (data
.muted_is_set
) {
1840 if (u
->sink
->set_mute
)
1841 u
->sink
->set_mute(u
->sink
);
1843 if (u
->sink
->get_mute
)
1844 u
->sink
->get_mute(u
->sink
);
1847 pa_sink_put(u
->sink
);
1850 pa_alsa_profile_set_free(profile_set
);
1860 pa_alsa_profile_set_free(profile_set
);
1865 static void userdata_free(struct userdata
*u
) {
1869 pa_sink_unlink(u
->sink
);
1872 pa_asyncmsgq_send(u
->thread_mq
.inq
, NULL
, PA_MESSAGE_SHUTDOWN
, NULL
, 0, NULL
);
1873 pa_thread_free(u
->thread
);
1876 pa_thread_mq_done(&u
->thread_mq
);
1879 pa_sink_unref(u
->sink
);
1881 if (u
->memchunk
.memblock
)
1882 pa_memblock_unref(u
->memchunk
.memblock
);
1884 if (u
->alsa_rtpoll_item
)
1885 pa_rtpoll_item_free(u
->alsa_rtpoll_item
);
1888 pa_rtpoll_free(u
->rtpoll
);
1890 if (u
->pcm_handle
) {
1891 snd_pcm_drop(u
->pcm_handle
);
1892 snd_pcm_close(u
->pcm_handle
);
1896 pa_alsa_fdlist_free(u
->mixer_fdl
);
1898 if (u
->mixer_path_set
)
1899 pa_alsa_path_set_free(u
->mixer_path_set
);
1900 else if (u
->mixer_path
)
1901 pa_alsa_path_free(u
->mixer_path
);
1903 if (u
->mixer_handle
)
1904 snd_mixer_close(u
->mixer_handle
);
1907 pa_smoother_free(u
->smoother
);
1912 pa_xfree(u
->device_name
);
1913 pa_xfree(u
->control_device
);
1917 void pa_alsa_sink_free(pa_sink
*s
) {
1920 pa_sink_assert_ref(s
);
1921 pa_assert_se(u
= s
->userdata
);