]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge branch 'master' of git://0pointer.de/pulseaudio into dbus-work
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
76 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
77
78 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
79 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
80
81 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
82
83 struct userdata {
84 pa_core *core;
85 pa_module *module;
86 pa_sink *sink;
87
88 pa_thread *thread;
89 pa_thread_mq thread_mq;
90 pa_rtpoll *rtpoll;
91
92 snd_pcm_t *pcm_handle;
93
94 pa_alsa_fdlist *mixer_fdl;
95 snd_mixer_t *mixer_handle;
96 pa_alsa_path_set *mixer_path_set;
97 pa_alsa_path *mixer_path;
98
99 pa_cvolume hardware_volume;
100
101 size_t
102 frame_size,
103 fragment_size,
104 hwbuf_size,
105 tsched_watermark,
106 hwbuf_unused,
107 min_sleep,
108 min_wakeup,
109 watermark_inc_step,
110 watermark_dec_step,
111 watermark_inc_threshold,
112 watermark_dec_threshold;
113
114 pa_usec_t watermark_dec_not_before;
115
116 unsigned nfragments;
117 pa_memchunk memchunk;
118
119 char *device_name; /* name of the PCM device */
120 char *control_device; /* name of the control device */
121
122 pa_bool_t use_mmap:1, use_tsched:1;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 uint64_t write_count;
132 uint64_t since_start;
133 pa_usec_t smoother_interval;
134 pa_usec_t last_smoother_update;
135
136 pa_reserve_wrapper *reserve;
137 pa_hook_slot *reserve_slot;
138 pa_reserve_monitor_wrapper *monitor;
139 pa_hook_slot *monitor_slot;
140 };
141
142 static void userdata_free(struct userdata *u);
143
144 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
145 pa_assert(r);
146 pa_assert(u);
147
148 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
149 return PA_HOOK_CANCEL;
150
151 return PA_HOOK_OK;
152 }
153
154 static void reserve_done(struct userdata *u) {
155 pa_assert(u);
156
157 if (u->reserve_slot) {
158 pa_hook_slot_free(u->reserve_slot);
159 u->reserve_slot = NULL;
160 }
161
162 if (u->reserve) {
163 pa_reserve_wrapper_unref(u->reserve);
164 u->reserve = NULL;
165 }
166 }
167
168 static void reserve_update(struct userdata *u) {
169 const char *description;
170 pa_assert(u);
171
172 if (!u->sink || !u->reserve)
173 return;
174
175 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
176 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
177 }
178
179 static int reserve_init(struct userdata *u, const char *dname) {
180 char *rname;
181
182 pa_assert(u);
183 pa_assert(dname);
184
185 if (u->reserve)
186 return 0;
187
188 if (pa_in_system_mode())
189 return 0;
190
191 if (!(rname = pa_alsa_get_reserve_name(dname)))
192 return 0;
193
194 /* We are resuming, try to lock the device */
195 u->reserve = pa_reserve_wrapper_get(u->core, rname);
196 pa_xfree(rname);
197
198 if (!(u->reserve))
199 return -1;
200
201 reserve_update(u);
202
203 pa_assert(!u->reserve_slot);
204 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
205
206 return 0;
207 }
208
209 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
210 pa_bool_t b;
211
212 pa_assert(w);
213 pa_assert(u);
214
215 b = PA_PTR_TO_UINT(busy) && !u->reserve;
216
217 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
218 return PA_HOOK_OK;
219 }
220
221 static void monitor_done(struct userdata *u) {
222 pa_assert(u);
223
224 if (u->monitor_slot) {
225 pa_hook_slot_free(u->monitor_slot);
226 u->monitor_slot = NULL;
227 }
228
229 if (u->monitor) {
230 pa_reserve_monitor_wrapper_unref(u->monitor);
231 u->monitor = NULL;
232 }
233 }
234
235 static int reserve_monitor_init(struct userdata *u, const char *dname) {
236 char *rname;
237
238 pa_assert(u);
239 pa_assert(dname);
240
241 if (pa_in_system_mode())
242 return 0;
243
244 if (!(rname = pa_alsa_get_reserve_name(dname)))
245 return 0;
246
247 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
248 pa_xfree(rname);
249
250 if (!(u->monitor))
251 return -1;
252
253 pa_assert(!u->monitor_slot);
254 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
255
256 return 0;
257 }
258
259 static void fix_min_sleep_wakeup(struct userdata *u) {
260 size_t max_use, max_use_2;
261
262 pa_assert(u);
263 pa_assert(u->use_tsched);
264
265 max_use = u->hwbuf_size - u->hwbuf_unused;
266 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
267
268 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
269 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
270
271 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
272 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
273 }
274
275 static void fix_tsched_watermark(struct userdata *u) {
276 size_t max_use;
277 pa_assert(u);
278 pa_assert(u->use_tsched);
279
280 max_use = u->hwbuf_size - u->hwbuf_unused;
281
282 if (u->tsched_watermark > max_use - u->min_sleep)
283 u->tsched_watermark = max_use - u->min_sleep;
284
285 if (u->tsched_watermark < u->min_wakeup)
286 u->tsched_watermark = u->min_wakeup;
287 }
288
289 static void increase_watermark(struct userdata *u) {
290 size_t old_watermark;
291 pa_usec_t old_min_latency, new_min_latency;
292
293 pa_assert(u);
294 pa_assert(u->use_tsched);
295
296 /* First, just try to increase the watermark */
297 old_watermark = u->tsched_watermark;
298 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
299 fix_tsched_watermark(u);
300
301 if (old_watermark != u->tsched_watermark) {
302 pa_log_info("Increasing wakeup watermark to %0.2f ms",
303 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
304 return;
305 }
306
307 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
308 old_min_latency = u->sink->thread_info.min_latency;
309 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
310 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
311
312 if (old_min_latency != new_min_latency) {
313 pa_log_info("Increasing minimal latency to %0.2f ms",
314 (double) new_min_latency / PA_USEC_PER_MSEC);
315
316 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
317 }
318
319 /* When we reach this we're officialy fucked! */
320 }
321
322 static void decrease_watermark(struct userdata *u) {
323 size_t old_watermark;
324 pa_usec_t now;
325
326 pa_assert(u);
327 pa_assert(u->use_tsched);
328
329 now = pa_rtclock_now();
330
331 if (u->watermark_dec_not_before <= 0)
332 goto restart;
333
334 if (u->watermark_dec_not_before > now)
335 return;
336
337 old_watermark = u->tsched_watermark;
338
339 if (u->tsched_watermark < u->watermark_dec_step)
340 u->tsched_watermark = u->tsched_watermark / 2;
341 else
342 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
343
344 fix_tsched_watermark(u);
345
346 if (old_watermark != u->tsched_watermark)
347 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
348 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
349
350 /* We don't change the latency range*/
351
352 restart:
353 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
354 }
355
356 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
357 pa_usec_t usec, wm;
358
359 pa_assert(sleep_usec);
360 pa_assert(process_usec);
361
362 pa_assert(u);
363 pa_assert(u->use_tsched);
364
365 usec = pa_sink_get_requested_latency_within_thread(u->sink);
366
367 if (usec == (pa_usec_t) -1)
368 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
369
370 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
371
372 if (wm > usec)
373 wm = usec/2;
374
375 *sleep_usec = usec - wm;
376 *process_usec = wm;
377
378 #ifdef DEBUG_TIMING
379 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
380 (unsigned long) (usec / PA_USEC_PER_MSEC),
381 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
382 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 #endif
384 }
385
386 static int try_recover(struct userdata *u, const char *call, int err) {
387 pa_assert(u);
388 pa_assert(call);
389 pa_assert(err < 0);
390
391 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
392
393 pa_assert(err != -EAGAIN);
394
395 if (err == -EPIPE)
396 pa_log_debug("%s: Buffer underrun!", call);
397
398 if (err == -ESTRPIPE)
399 pa_log_debug("%s: System suspended!", call);
400
401 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
402 pa_log("%s: %s", call, pa_alsa_strerror(err));
403 return -1;
404 }
405
406 u->first = TRUE;
407 u->since_start = 0;
408 return 0;
409 }
410
411 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
412 size_t left_to_play;
413
414 /* We use <= instead of < for this check here because an underrun
415 * only happens after the last sample was processed, not already when
416 * it is removed from the buffer. This is particularly important
417 * when block transfer is used. */
418
419 if (n_bytes <= u->hwbuf_size)
420 left_to_play = u->hwbuf_size - n_bytes;
421 else {
422
423 /* We got a dropout. What a mess! */
424 left_to_play = 0;
425
426 #ifdef DEBUG_TIMING
427 PA_DEBUG_TRAP;
428 #endif
429
430 if (!u->first && !u->after_rewind)
431 if (pa_log_ratelimit())
432 pa_log_info("Underrun!");
433 }
434
435 #ifdef DEBUG_TIMING
436 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
437 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
438 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
439 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
440 #endif
441
442 if (u->use_tsched) {
443 pa_bool_t reset_not_before = TRUE;
444
445 if (!u->first && !u->after_rewind) {
446 if (left_to_play < u->watermark_inc_threshold)
447 increase_watermark(u);
448 else if (left_to_play > u->watermark_dec_threshold) {
449 reset_not_before = FALSE;
450
451 /* We decrease the watermark only if have actually
452 * been woken up by a timeout. If something else woke
453 * us up it's too easy to fulfill the deadlines... */
454
455 if (on_timeout)
456 decrease_watermark(u);
457 }
458 }
459
460 if (reset_not_before)
461 u->watermark_dec_not_before = 0;
462 }
463
464 return left_to_play;
465 }
466
467 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
468 pa_bool_t work_done = TRUE;
469 pa_usec_t max_sleep_usec = 0, process_usec = 0;
470 size_t left_to_play;
471 unsigned j = 0;
472
473 pa_assert(u);
474 pa_sink_assert_ref(u->sink);
475
476 if (u->use_tsched)
477 hw_sleep_time(u, &max_sleep_usec, &process_usec);
478
479 for (;;) {
480 snd_pcm_sframes_t n;
481 size_t n_bytes;
482 int r;
483 pa_bool_t after_avail = TRUE;
484
485 /* First we determine how many samples are missing to fill the
486 * buffer up to 100% */
487
488 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
489
490 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
491 continue;
492
493 return r;
494 }
495
496 n_bytes = (size_t) n * u->frame_size;
497
498 #ifdef DEBUG_TIMING
499 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
500 #endif
501
502 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
503 on_timeout = FALSE;
504
505 if (u->use_tsched)
506
507 /* We won't fill up the playback buffer before at least
508 * half the sleep time is over because otherwise we might
509 * ask for more data from the clients then they expect. We
510 * need to guarantee that clients only have to keep around
511 * a single hw buffer length. */
512
513 if (!polled &&
514 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
515 #ifdef DEBUG_TIMING
516 pa_log_debug("Not filling up, because too early.");
517 #endif
518 break;
519 }
520
521 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
522
523 if (polled)
524 PA_ONCE_BEGIN {
525 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
526 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
527 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
528 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
529 pa_strnull(dn));
530 pa_xfree(dn);
531 } PA_ONCE_END;
532
533 #ifdef DEBUG_TIMING
534 pa_log_debug("Not filling up, because not necessary.");
535 #endif
536 break;
537 }
538
539
540 if (++j > 10) {
541 #ifdef DEBUG_TIMING
542 pa_log_debug("Not filling up, because already too many iterations.");
543 #endif
544
545 break;
546 }
547
548 n_bytes -= u->hwbuf_unused;
549 polled = FALSE;
550
551 #ifdef DEBUG_TIMING
552 pa_log_debug("Filling up");
553 #endif
554
555 for (;;) {
556 pa_memchunk chunk;
557 void *p;
558 int err;
559 const snd_pcm_channel_area_t *areas;
560 snd_pcm_uframes_t offset, frames;
561 snd_pcm_sframes_t sframes;
562
563 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
564 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
565
566 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
567
568 if (!after_avail && err == -EAGAIN)
569 break;
570
571 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
572 continue;
573
574 return r;
575 }
576
577 /* Make sure that if these memblocks need to be copied they will fit into one slot */
578 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
579 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
580
581 if (!after_avail && frames == 0)
582 break;
583
584 pa_assert(frames > 0);
585 after_avail = FALSE;
586
587 /* Check these are multiples of 8 bit */
588 pa_assert((areas[0].first & 7) == 0);
589 pa_assert((areas[0].step & 7)== 0);
590
591 /* We assume a single interleaved memory buffer */
592 pa_assert((areas[0].first >> 3) == 0);
593 pa_assert((areas[0].step >> 3) == u->frame_size);
594
595 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
596
597 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
598 chunk.length = pa_memblock_get_length(chunk.memblock);
599 chunk.index = 0;
600
601 pa_sink_render_into_full(u->sink, &chunk);
602 pa_memblock_unref_fixed(chunk.memblock);
603
604 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
605
606 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
607 continue;
608
609 return r;
610 }
611
612 work_done = TRUE;
613
614 u->write_count += frames * u->frame_size;
615 u->since_start += frames * u->frame_size;
616
617 #ifdef DEBUG_TIMING
618 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
619 #endif
620
621 if ((size_t) frames * u->frame_size >= n_bytes)
622 break;
623
624 n_bytes -= (size_t) frames * u->frame_size;
625 }
626 }
627
628 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
629
630 if (*sleep_usec > process_usec)
631 *sleep_usec -= process_usec;
632 else
633 *sleep_usec = 0;
634
635 return work_done ? 1 : 0;
636 }
637
638 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
639 pa_bool_t work_done = FALSE;
640 pa_usec_t max_sleep_usec = 0, process_usec = 0;
641 size_t left_to_play;
642 unsigned j = 0;
643
644 pa_assert(u);
645 pa_sink_assert_ref(u->sink);
646
647 if (u->use_tsched)
648 hw_sleep_time(u, &max_sleep_usec, &process_usec);
649
650 for (;;) {
651 snd_pcm_sframes_t n;
652 size_t n_bytes;
653 int r;
654
655 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
656
657 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
658 continue;
659
660 return r;
661 }
662
663 n_bytes = (size_t) n * u->frame_size;
664 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
665 on_timeout = FALSE;
666
667 if (u->use_tsched)
668
669 /* We won't fill up the playback buffer before at least
670 * half the sleep time is over because otherwise we might
671 * ask for more data from the clients then they expect. We
672 * need to guarantee that clients only have to keep around
673 * a single hw buffer length. */
674
675 if (!polled &&
676 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
677 break;
678
679 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
680
681 if (polled)
682 PA_ONCE_BEGIN {
683 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
684 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
685 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
686 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
687 pa_strnull(dn));
688 pa_xfree(dn);
689 } PA_ONCE_END;
690
691 break;
692 }
693
694 if (++j > 10) {
695 #ifdef DEBUG_TIMING
696 pa_log_debug("Not filling up, because already too many iterations.");
697 #endif
698
699 break;
700 }
701
702 n_bytes -= u->hwbuf_unused;
703 polled = FALSE;
704
705 for (;;) {
706 snd_pcm_sframes_t frames;
707 void *p;
708 pa_bool_t after_avail = TRUE;
709
710 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
711
712 if (u->memchunk.length <= 0)
713 pa_sink_render(u->sink, n_bytes, &u->memchunk);
714
715 pa_assert(u->memchunk.length > 0);
716
717 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
718
719 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
720 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
721
722 p = pa_memblock_acquire(u->memchunk.memblock);
723 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
724 pa_memblock_release(u->memchunk.memblock);
725
726 if (PA_UNLIKELY(frames < 0)) {
727
728 if (!after_avail && (int) frames == -EAGAIN)
729 break;
730
731 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
732 continue;
733
734 return r;
735 }
736
737 if (!after_avail && frames == 0)
738 break;
739
740 pa_assert(frames > 0);
741 after_avail = FALSE;
742
743 u->memchunk.index += (size_t) frames * u->frame_size;
744 u->memchunk.length -= (size_t) frames * u->frame_size;
745
746 if (u->memchunk.length <= 0) {
747 pa_memblock_unref(u->memchunk.memblock);
748 pa_memchunk_reset(&u->memchunk);
749 }
750
751 work_done = TRUE;
752
753 u->write_count += frames * u->frame_size;
754 u->since_start += frames * u->frame_size;
755
756 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
757
758 if ((size_t) frames * u->frame_size >= n_bytes)
759 break;
760
761 n_bytes -= (size_t) frames * u->frame_size;
762 }
763 }
764
765 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
766
767 if (*sleep_usec > process_usec)
768 *sleep_usec -= process_usec;
769 else
770 *sleep_usec = 0;
771
772 return work_done ? 1 : 0;
773 }
774
775 static void update_smoother(struct userdata *u) {
776 snd_pcm_sframes_t delay = 0;
777 int64_t position;
778 int err;
779 pa_usec_t now1 = 0, now2;
780 snd_pcm_status_t *status;
781
782 snd_pcm_status_alloca(&status);
783
784 pa_assert(u);
785 pa_assert(u->pcm_handle);
786
787 /* Let's update the time smoother */
788
789 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
790 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
791 return;
792 }
793
794 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
795 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
796 else {
797 snd_htimestamp_t htstamp = { 0, 0 };
798 snd_pcm_status_get_htstamp(status, &htstamp);
799 now1 = pa_timespec_load(&htstamp);
800 }
801
802 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
803 if (now1 <= 0)
804 now1 = pa_rtclock_now();
805
806 /* check if the time since the last update is bigger than the interval */
807 if (u->last_smoother_update > 0)
808 if (u->last_smoother_update + u->smoother_interval > now1)
809 return;
810
811 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
812
813 if (PA_UNLIKELY(position < 0))
814 position = 0;
815
816 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
817
818 pa_smoother_put(u->smoother, now1, now2);
819
820 u->last_smoother_update = now1;
821 /* exponentially increase the update interval up to the MAX limit */
822 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
823 }
824
825 static pa_usec_t sink_get_latency(struct userdata *u) {
826 pa_usec_t r;
827 int64_t delay;
828 pa_usec_t now1, now2;
829
830 pa_assert(u);
831
832 now1 = pa_rtclock_now();
833 now2 = pa_smoother_get(u->smoother, now1);
834
835 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
836
837 r = delay >= 0 ? (pa_usec_t) delay : 0;
838
839 if (u->memchunk.memblock)
840 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
841
842 return r;
843 }
844
845 static int build_pollfd(struct userdata *u) {
846 pa_assert(u);
847 pa_assert(u->pcm_handle);
848
849 if (u->alsa_rtpoll_item)
850 pa_rtpoll_item_free(u->alsa_rtpoll_item);
851
852 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
853 return -1;
854
855 return 0;
856 }
857
858 /* Called from IO context */
859 static int suspend(struct userdata *u) {
860 pa_assert(u);
861 pa_assert(u->pcm_handle);
862
863 pa_smoother_pause(u->smoother, pa_rtclock_now());
864
865 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
866 * take awfully long with our long buffer sizes today. */
867 snd_pcm_close(u->pcm_handle);
868 u->pcm_handle = NULL;
869
870 if (u->alsa_rtpoll_item) {
871 pa_rtpoll_item_free(u->alsa_rtpoll_item);
872 u->alsa_rtpoll_item = NULL;
873 }
874
875 pa_log_info("Device suspended...");
876
877 return 0;
878 }
879
880 /* Called from IO context */
881 static int update_sw_params(struct userdata *u) {
882 snd_pcm_uframes_t avail_min;
883 int err;
884
885 pa_assert(u);
886
887 /* Use the full buffer if noone asked us for anything specific */
888 u->hwbuf_unused = 0;
889
890 if (u->use_tsched) {
891 pa_usec_t latency;
892
893 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
894 size_t b;
895
896 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
897
898 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
899
900 /* We need at least one sample in our buffer */
901
902 if (PA_UNLIKELY(b < u->frame_size))
903 b = u->frame_size;
904
905 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
906 }
907
908 fix_min_sleep_wakeup(u);
909 fix_tsched_watermark(u);
910 }
911
912 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
913
914 /* We need at last one frame in the used part of the buffer */
915 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
916
917 if (u->use_tsched) {
918 pa_usec_t sleep_usec, process_usec;
919
920 hw_sleep_time(u, &sleep_usec, &process_usec);
921 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
922 }
923
924 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
925
926 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
927 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
928 return err;
929 }
930
931 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
932
933 return 0;
934 }
935
936 /* Called from IO context */
937 static int unsuspend(struct userdata *u) {
938 pa_sample_spec ss;
939 int err;
940 pa_bool_t b, d;
941 unsigned nfrags;
942 snd_pcm_uframes_t period_size;
943
944 pa_assert(u);
945 pa_assert(!u->pcm_handle);
946
947 pa_log_info("Trying resume...");
948
949 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
950 /*SND_PCM_NONBLOCK|*/
951 SND_PCM_NO_AUTO_RESAMPLE|
952 SND_PCM_NO_AUTO_CHANNELS|
953 SND_PCM_NO_AUTO_FORMAT)) < 0) {
954 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
955 goto fail;
956 }
957
958 ss = u->sink->sample_spec;
959 nfrags = u->nfragments;
960 period_size = u->fragment_size / u->frame_size;
961 b = u->use_mmap;
962 d = u->use_tsched;
963
964 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
965 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
966 goto fail;
967 }
968
969 if (b != u->use_mmap || d != u->use_tsched) {
970 pa_log_warn("Resume failed, couldn't get original access mode.");
971 goto fail;
972 }
973
974 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
975 pa_log_warn("Resume failed, couldn't restore original sample settings.");
976 goto fail;
977 }
978
979 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
980 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
981 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
982 (unsigned long) nfrags, period_size * u->frame_size);
983 goto fail;
984 }
985
986 if (update_sw_params(u) < 0)
987 goto fail;
988
989 if (build_pollfd(u) < 0)
990 goto fail;
991
992 u->write_count = 0;
993 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
994 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
995 u->last_smoother_update = 0;
996
997 u->first = TRUE;
998 u->since_start = 0;
999
1000 pa_log_info("Resumed successfully...");
1001
1002 return 0;
1003
1004 fail:
1005 if (u->pcm_handle) {
1006 snd_pcm_close(u->pcm_handle);
1007 u->pcm_handle = NULL;
1008 }
1009
1010 return -1;
1011 }
1012
1013 /* Called from IO context */
1014 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1015 struct userdata *u = PA_SINK(o)->userdata;
1016
1017 switch (code) {
1018
1019 case PA_SINK_MESSAGE_GET_LATENCY: {
1020 pa_usec_t r = 0;
1021
1022 if (u->pcm_handle)
1023 r = sink_get_latency(u);
1024
1025 *((pa_usec_t*) data) = r;
1026
1027 return 0;
1028 }
1029
1030 case PA_SINK_MESSAGE_SET_STATE:
1031
1032 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1033
1034 case PA_SINK_SUSPENDED:
1035 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1036
1037 if (suspend(u) < 0)
1038 return -1;
1039
1040 break;
1041
1042 case PA_SINK_IDLE:
1043 case PA_SINK_RUNNING:
1044
1045 if (u->sink->thread_info.state == PA_SINK_INIT) {
1046 if (build_pollfd(u) < 0)
1047 return -1;
1048 }
1049
1050 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1051 if (unsuspend(u) < 0)
1052 return -1;
1053 }
1054
1055 break;
1056
1057 case PA_SINK_UNLINKED:
1058 case PA_SINK_INIT:
1059 case PA_SINK_INVALID_STATE:
1060 ;
1061 }
1062
1063 break;
1064 }
1065
1066 return pa_sink_process_msg(o, code, data, offset, chunk);
1067 }
1068
1069 /* Called from main context */
1070 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1071 pa_sink_state_t old_state;
1072 struct userdata *u;
1073
1074 pa_sink_assert_ref(s);
1075 pa_assert_se(u = s->userdata);
1076
1077 old_state = pa_sink_get_state(u->sink);
1078
1079 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1080 reserve_done(u);
1081 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1082 if (reserve_init(u, u->device_name) < 0)
1083 return -1;
1084
1085 return 0;
1086 }
1087
1088 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1089 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1090
1091 pa_assert(u);
1092 pa_assert(u->mixer_handle);
1093
1094 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1095 return 0;
1096
1097 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1098 pa_sink_get_volume(u->sink, TRUE);
1099 pa_sink_get_mute(u->sink, TRUE);
1100 }
1101
1102 return 0;
1103 }
1104
1105 static void sink_get_volume_cb(pa_sink *s) {
1106 struct userdata *u = s->userdata;
1107 pa_cvolume r;
1108 char t[PA_CVOLUME_SNPRINT_MAX];
1109
1110 pa_assert(u);
1111 pa_assert(u->mixer_path);
1112 pa_assert(u->mixer_handle);
1113
1114 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1115 return;
1116
1117 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1118 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1119
1120 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1121
1122 if (pa_cvolume_equal(&u->hardware_volume, &r))
1123 return;
1124
1125 s->real_volume = u->hardware_volume = r;
1126
1127 /* Hmm, so the hardware volume changed, let's reset our software volume */
1128 if (u->mixer_path->has_dB)
1129 pa_sink_set_soft_volume(s, NULL);
1130 }
1131
1132 static void sink_set_volume_cb(pa_sink *s) {
1133 struct userdata *u = s->userdata;
1134 pa_cvolume r;
1135 char t[PA_CVOLUME_SNPRINT_MAX];
1136
1137 pa_assert(u);
1138 pa_assert(u->mixer_path);
1139 pa_assert(u->mixer_handle);
1140
1141 /* Shift up by the base volume */
1142 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1143
1144 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1145 return;
1146
1147 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1149
1150 u->hardware_volume = r;
1151
1152 if (u->mixer_path->has_dB) {
1153 pa_cvolume new_soft_volume;
1154 pa_bool_t accurate_enough;
1155
1156 /* Match exactly what the user requested by software */
1157 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1158
1159 /* If the adjustment to do in software is only minimal we
1160 * can skip it. That saves us CPU at the expense of a bit of
1161 * accuracy */
1162 accurate_enough =
1163 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1164 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1165
1166 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1167 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1168 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1169 pa_yes_no(accurate_enough));
1170
1171 if (!accurate_enough)
1172 s->soft_volume = new_soft_volume;
1173
1174 } else {
1175 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1176
1177 /* We can't match exactly what the user requested, hence let's
1178 * at least tell the user about it */
1179
1180 s->real_volume = r;
1181 }
1182 }
1183
1184 static void sink_get_mute_cb(pa_sink *s) {
1185 struct userdata *u = s->userdata;
1186 pa_bool_t b;
1187
1188 pa_assert(u);
1189 pa_assert(u->mixer_path);
1190 pa_assert(u->mixer_handle);
1191
1192 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1193 return;
1194
1195 s->muted = b;
1196 }
1197
1198 static void sink_set_mute_cb(pa_sink *s) {
1199 struct userdata *u = s->userdata;
1200
1201 pa_assert(u);
1202 pa_assert(u->mixer_path);
1203 pa_assert(u->mixer_handle);
1204
1205 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1206 }
1207
1208 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1209 struct userdata *u = s->userdata;
1210 pa_alsa_port_data *data;
1211
1212 pa_assert(u);
1213 pa_assert(p);
1214 pa_assert(u->mixer_handle);
1215
1216 data = PA_DEVICE_PORT_DATA(p);
1217
1218 pa_assert_se(u->mixer_path = data->path);
1219 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1220
1221 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1222 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1223 s->n_volume_steps = PA_VOLUME_NORM+1;
1224
1225 if (u->mixer_path->max_dB > 0.0)
1226 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1227 else
1228 pa_log_info("No particular base volume set, fixing to 0 dB");
1229 } else {
1230 s->base_volume = PA_VOLUME_NORM;
1231 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1232 }
1233
1234 if (data->setting)
1235 pa_alsa_setting_select(data->setting, u->mixer_handle);
1236
1237 if (s->set_mute)
1238 s->set_mute(s);
1239 if (s->set_volume)
1240 s->set_volume(s);
1241
1242 return 0;
1243 }
1244
1245 static void sink_update_requested_latency_cb(pa_sink *s) {
1246 struct userdata *u = s->userdata;
1247 size_t before;
1248 pa_assert(u);
1249
1250 if (!u->pcm_handle)
1251 return;
1252
1253 before = u->hwbuf_unused;
1254 update_sw_params(u);
1255
1256 /* Let's check whether we now use only a smaller part of the
1257 buffer then before. If so, we need to make sure that subsequent
1258 rewinds are relative to the new maximum fill level and not to the
1259 current fill level. Thus, let's do a full rewind once, to clear
1260 things up. */
1261
1262 if (u->hwbuf_unused > before) {
1263 pa_log_debug("Requesting rewind due to latency change.");
1264 pa_sink_request_rewind(s, (size_t) -1);
1265 }
1266 }
1267
1268 static int process_rewind(struct userdata *u) {
1269 snd_pcm_sframes_t unused;
1270 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1271 pa_assert(u);
1272
1273 /* Figure out how much we shall rewind and reset the counter */
1274 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1275
1276 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1277
1278 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1279 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1280 return -1;
1281 }
1282
1283 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1284
1285 if (u->hwbuf_size > unused_nbytes)
1286 limit_nbytes = u->hwbuf_size - unused_nbytes;
1287 else
1288 limit_nbytes = 0;
1289
1290 if (rewind_nbytes > limit_nbytes)
1291 rewind_nbytes = limit_nbytes;
1292
1293 if (rewind_nbytes > 0) {
1294 snd_pcm_sframes_t in_frames, out_frames;
1295
1296 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1297
1298 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1299 pa_log_debug("before: %lu", (unsigned long) in_frames);
1300 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1301 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1302 if (try_recover(u, "process_rewind", out_frames) < 0)
1303 return -1;
1304 out_frames = 0;
1305 }
1306
1307 pa_log_debug("after: %lu", (unsigned long) out_frames);
1308
1309 rewind_nbytes = (size_t) out_frames * u->frame_size;
1310
1311 if (rewind_nbytes <= 0)
1312 pa_log_info("Tried rewind, but was apparently not possible.");
1313 else {
1314 u->write_count -= rewind_nbytes;
1315 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1316 pa_sink_process_rewind(u->sink, rewind_nbytes);
1317
1318 u->after_rewind = TRUE;
1319 return 0;
1320 }
1321 } else
1322 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1323
1324 pa_sink_process_rewind(u->sink, 0);
1325 return 0;
1326 }
1327
1328 static void thread_func(void *userdata) {
1329 struct userdata *u = userdata;
1330 unsigned short revents = 0;
1331
1332 pa_assert(u);
1333
1334 pa_log_debug("Thread starting up");
1335
1336 if (u->core->realtime_scheduling)
1337 pa_make_realtime(u->core->realtime_priority);
1338
1339 pa_thread_mq_install(&u->thread_mq);
1340
1341 for (;;) {
1342 int ret;
1343
1344 #ifdef DEBUG_TIMING
1345 pa_log_debug("Loop");
1346 #endif
1347
1348 /* Render some data and write it to the dsp */
1349 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1350 int work_done;
1351 pa_usec_t sleep_usec = 0;
1352 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1353
1354 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1355 if (process_rewind(u) < 0)
1356 goto fail;
1357
1358 if (u->use_mmap)
1359 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1360 else
1361 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1362
1363 if (work_done < 0)
1364 goto fail;
1365
1366 /* pa_log_debug("work_done = %i", work_done); */
1367
1368 if (work_done) {
1369
1370 if (u->first) {
1371 pa_log_info("Starting playback.");
1372 snd_pcm_start(u->pcm_handle);
1373
1374 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1375 }
1376
1377 update_smoother(u);
1378 }
1379
1380 if (u->use_tsched) {
1381 pa_usec_t cusec;
1382
1383 if (u->since_start <= u->hwbuf_size) {
1384
1385 /* USB devices on ALSA seem to hit a buffer
1386 * underrun during the first iterations much
1387 * quicker then we calculate here, probably due to
1388 * the transport latency. To accommodate for that
1389 * we artificially decrease the sleep time until
1390 * we have filled the buffer at least once
1391 * completely.*/
1392
1393 if (pa_log_ratelimit())
1394 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1395 sleep_usec /= 2;
1396 }
1397
1398 /* OK, the playback buffer is now full, let's
1399 * calculate when to wake up next */
1400 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1401
1402 /* Convert from the sound card time domain to the
1403 * system time domain */
1404 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1405
1406 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1407
1408 /* We don't trust the conversion, so we wake up whatever comes first */
1409 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1410 }
1411
1412 u->first = FALSE;
1413 u->after_rewind = FALSE;
1414
1415 } else if (u->use_tsched)
1416
1417 /* OK, we're in an invalid state, let's disable our timers */
1418 pa_rtpoll_set_timer_disabled(u->rtpoll);
1419
1420 /* Hmm, nothing to do. Let's sleep */
1421 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1422 goto fail;
1423
1424 if (ret == 0)
1425 goto finish;
1426
1427 /* Tell ALSA about this and process its response */
1428 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1429 struct pollfd *pollfd;
1430 int err;
1431 unsigned n;
1432
1433 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1434
1435 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1436 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1437 goto fail;
1438 }
1439
1440 if (revents & ~POLLOUT) {
1441 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1442 goto fail;
1443
1444 u->first = TRUE;
1445 u->since_start = 0;
1446 } else if (revents && u->use_tsched && pa_log_ratelimit())
1447 pa_log_debug("Wakeup from ALSA!");
1448
1449 } else
1450 revents = 0;
1451 }
1452
1453 fail:
1454 /* If this was no regular exit from the loop we have to continue
1455 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1456 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1457 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1458
1459 finish:
1460 pa_log_debug("Thread shutting down");
1461 }
1462
1463 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1464 const char *n;
1465 char *t;
1466
1467 pa_assert(data);
1468 pa_assert(ma);
1469 pa_assert(device_name);
1470
1471 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1472 pa_sink_new_data_set_name(data, n);
1473 data->namereg_fail = TRUE;
1474 return;
1475 }
1476
1477 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1478 data->namereg_fail = TRUE;
1479 else {
1480 n = device_id ? device_id : device_name;
1481 data->namereg_fail = FALSE;
1482 }
1483
1484 if (mapping)
1485 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1486 else
1487 t = pa_sprintf_malloc("alsa_output.%s", n);
1488
1489 pa_sink_new_data_set_name(data, t);
1490 pa_xfree(t);
1491 }
1492
1493 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1494
1495 if (!mapping && !element)
1496 return;
1497
1498 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1499 pa_log_info("Failed to find a working mixer device.");
1500 return;
1501 }
1502
1503 if (element) {
1504
1505 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1506 goto fail;
1507
1508 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1509 goto fail;
1510
1511 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1512 pa_alsa_path_dump(u->mixer_path);
1513 } else {
1514
1515 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1516 goto fail;
1517
1518 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1519
1520 pa_log_debug("Probed mixer paths:");
1521 pa_alsa_path_set_dump(u->mixer_path_set);
1522 }
1523
1524 return;
1525
1526 fail:
1527
1528 if (u->mixer_path_set) {
1529 pa_alsa_path_set_free(u->mixer_path_set);
1530 u->mixer_path_set = NULL;
1531 } else if (u->mixer_path) {
1532 pa_alsa_path_free(u->mixer_path);
1533 u->mixer_path = NULL;
1534 }
1535
1536 if (u->mixer_handle) {
1537 snd_mixer_close(u->mixer_handle);
1538 u->mixer_handle = NULL;
1539 }
1540 }
1541
1542 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1543 pa_assert(u);
1544
1545 if (!u->mixer_handle)
1546 return 0;
1547
1548 if (u->sink->active_port) {
1549 pa_alsa_port_data *data;
1550
1551 /* We have a list of supported paths, so let's activate the
1552 * one that has been chosen as active */
1553
1554 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1555 u->mixer_path = data->path;
1556
1557 pa_alsa_path_select(data->path, u->mixer_handle);
1558
1559 if (data->setting)
1560 pa_alsa_setting_select(data->setting, u->mixer_handle);
1561
1562 } else {
1563
1564 if (!u->mixer_path && u->mixer_path_set)
1565 u->mixer_path = u->mixer_path_set->paths;
1566
1567 if (u->mixer_path) {
1568 /* Hmm, we have only a single path, then let's activate it */
1569
1570 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1571
1572 if (u->mixer_path->settings)
1573 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1574 } else
1575 return 0;
1576 }
1577
1578 if (!u->mixer_path->has_volume)
1579 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1580 else {
1581
1582 if (u->mixer_path->has_dB) {
1583 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1584
1585 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1586 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1587
1588 if (u->mixer_path->max_dB > 0.0)
1589 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1590 else
1591 pa_log_info("No particular base volume set, fixing to 0 dB");
1592
1593 } else {
1594 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1595 u->sink->base_volume = PA_VOLUME_NORM;
1596 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1597 }
1598
1599 u->sink->get_volume = sink_get_volume_cb;
1600 u->sink->set_volume = sink_set_volume_cb;
1601
1602 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1603 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1604 }
1605
1606 if (!u->mixer_path->has_mute) {
1607 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1608 } else {
1609 u->sink->get_mute = sink_get_mute_cb;
1610 u->sink->set_mute = sink_set_mute_cb;
1611 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1612 pa_log_info("Using hardware mute control.");
1613 }
1614
1615 u->mixer_fdl = pa_alsa_fdlist_new();
1616
1617 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1618 pa_log("Failed to initialize file descriptor monitoring");
1619 return -1;
1620 }
1621
1622 if (u->mixer_path_set)
1623 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1624 else
1625 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1626
1627 return 0;
1628 }
1629
1630 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1631
1632 struct userdata *u = NULL;
1633 const char *dev_id = NULL;
1634 pa_sample_spec ss, requested_ss;
1635 pa_channel_map map;
1636 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1637 snd_pcm_uframes_t period_frames, tsched_frames;
1638 size_t frame_size;
1639 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1640 pa_sink_new_data data;
1641 pa_alsa_profile_set *profile_set = NULL;
1642
1643 pa_assert(m);
1644 pa_assert(ma);
1645
1646 ss = m->core->default_sample_spec;
1647 map = m->core->default_channel_map;
1648 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1649 pa_log("Failed to parse sample specification and channel map");
1650 goto fail;
1651 }
1652
1653 requested_ss = ss;
1654 frame_size = pa_frame_size(&ss);
1655
1656 nfrags = m->core->default_n_fragments;
1657 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1658 if (frag_size <= 0)
1659 frag_size = (uint32_t) frame_size;
1660 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1661 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1662
1663 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1664 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1665 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1666 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1667 pa_log("Failed to parse buffer metrics");
1668 goto fail;
1669 }
1670
1671 hwbuf_size = frag_size * nfrags;
1672 period_frames = frag_size/frame_size;
1673 tsched_frames = tsched_size/frame_size;
1674
1675 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1676 pa_log("Failed to parse mmap argument.");
1677 goto fail;
1678 }
1679
1680 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1681 pa_log("Failed to parse tsched argument.");
1682 goto fail;
1683 }
1684
1685 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1686 pa_log("Failed to parse ignore_dB argument.");
1687 goto fail;
1688 }
1689
1690 if (use_tsched && !pa_rtclock_hrtimer()) {
1691 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1692 use_tsched = FALSE;
1693 }
1694
1695 u = pa_xnew0(struct userdata, 1);
1696 u->core = m->core;
1697 u->module = m;
1698 u->use_mmap = use_mmap;
1699 u->use_tsched = use_tsched;
1700 u->first = TRUE;
1701 u->rtpoll = pa_rtpoll_new();
1702 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1703
1704 u->smoother = pa_smoother_new(
1705 DEFAULT_TSCHED_BUFFER_USEC*2,
1706 DEFAULT_TSCHED_BUFFER_USEC*2,
1707 TRUE,
1708 TRUE,
1709 5,
1710 pa_rtclock_now(),
1711 TRUE);
1712 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1713
1714 dev_id = pa_modargs_get_value(
1715 ma, "device_id",
1716 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1717
1718 if (reserve_init(u, dev_id) < 0)
1719 goto fail;
1720
1721 if (reserve_monitor_init(u, dev_id) < 0)
1722 goto fail;
1723
1724 b = use_mmap;
1725 d = use_tsched;
1726
1727 if (mapping) {
1728
1729 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1730 pa_log("device_id= not set");
1731 goto fail;
1732 }
1733
1734 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1735 dev_id,
1736 &u->device_name,
1737 &ss, &map,
1738 SND_PCM_STREAM_PLAYBACK,
1739 &nfrags, &period_frames, tsched_frames,
1740 &b, &d, mapping)))
1741
1742 goto fail;
1743
1744 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1745
1746 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1747 goto fail;
1748
1749 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1750 dev_id,
1751 &u->device_name,
1752 &ss, &map,
1753 SND_PCM_STREAM_PLAYBACK,
1754 &nfrags, &period_frames, tsched_frames,
1755 &b, &d, profile_set, &mapping)))
1756
1757 goto fail;
1758
1759 } else {
1760
1761 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1762 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1763 &u->device_name,
1764 &ss, &map,
1765 SND_PCM_STREAM_PLAYBACK,
1766 &nfrags, &period_frames, tsched_frames,
1767 &b, &d, FALSE)))
1768 goto fail;
1769 }
1770
1771 pa_assert(u->device_name);
1772 pa_log_info("Successfully opened device %s.", u->device_name);
1773
1774 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1775 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1776 goto fail;
1777 }
1778
1779 if (mapping)
1780 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1781
1782 if (use_mmap && !b) {
1783 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1784 u->use_mmap = use_mmap = FALSE;
1785 }
1786
1787 if (use_tsched && (!b || !d)) {
1788 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1789 u->use_tsched = use_tsched = FALSE;
1790 }
1791
1792 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1793 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1794 u->use_tsched = use_tsched = FALSE;
1795 }
1796
1797 if (u->use_mmap)
1798 pa_log_info("Successfully enabled mmap() mode.");
1799
1800 if (u->use_tsched)
1801 pa_log_info("Successfully enabled timer-based scheduling mode.");
1802
1803 /* ALSA might tweak the sample spec, so recalculate the frame size */
1804 frame_size = pa_frame_size(&ss);
1805
1806 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1807
1808 pa_sink_new_data_init(&data);
1809 data.driver = driver;
1810 data.module = m;
1811 data.card = card;
1812 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1813 pa_sink_new_data_set_sample_spec(&data, &ss);
1814 pa_sink_new_data_set_channel_map(&data, &map);
1815
1816 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1817 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1818 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1819 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1820 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1821
1822 if (mapping) {
1823 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1824 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1825 }
1826
1827 pa_alsa_init_description(data.proplist);
1828
1829 if (u->control_device)
1830 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1831
1832 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1833 pa_log("Invalid properties");
1834 pa_sink_new_data_done(&data);
1835 goto fail;
1836 }
1837
1838 if (u->mixer_path_set)
1839 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1840
1841 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1842 pa_sink_new_data_done(&data);
1843
1844 if (!u->sink) {
1845 pa_log("Failed to create sink object");
1846 goto fail;
1847 }
1848
1849 u->sink->parent.process_msg = sink_process_msg;
1850 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1851 u->sink->set_state = sink_set_state_cb;
1852 u->sink->set_port = sink_set_port_cb;
1853 u->sink->userdata = u;
1854
1855 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1856 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1857
1858 u->frame_size = frame_size;
1859 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1860 u->nfragments = nfrags;
1861 u->hwbuf_size = u->fragment_size * nfrags;
1862 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1863
1864 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1865 nfrags, (long unsigned) u->fragment_size,
1866 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1867
1868 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1869 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1870
1871 if (u->use_tsched) {
1872 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1873
1874 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1875 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1876
1877 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1878 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1879
1880 fix_min_sleep_wakeup(u);
1881 fix_tsched_watermark(u);
1882
1883 pa_sink_set_latency_range(u->sink,
1884 0,
1885 pa_bytes_to_usec(u->hwbuf_size, &ss));
1886
1887 pa_log_info("Time scheduling watermark is %0.2fms",
1888 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1889 } else
1890 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1891
1892
1893 reserve_update(u);
1894
1895 if (update_sw_params(u) < 0)
1896 goto fail;
1897
1898 if (setup_mixer(u, ignore_dB) < 0)
1899 goto fail;
1900
1901 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1902
1903 if (!(u->thread = pa_thread_new(thread_func, u))) {
1904 pa_log("Failed to create thread.");
1905 goto fail;
1906 }
1907
1908 /* Get initial mixer settings */
1909 if (data.volume_is_set) {
1910 if (u->sink->set_volume)
1911 u->sink->set_volume(u->sink);
1912 } else {
1913 if (u->sink->get_volume)
1914 u->sink->get_volume(u->sink);
1915 }
1916
1917 if (data.muted_is_set) {
1918 if (u->sink->set_mute)
1919 u->sink->set_mute(u->sink);
1920 } else {
1921 if (u->sink->get_mute)
1922 u->sink->get_mute(u->sink);
1923 }
1924
1925 pa_sink_put(u->sink);
1926
1927 if (profile_set)
1928 pa_alsa_profile_set_free(profile_set);
1929
1930 return u->sink;
1931
1932 fail:
1933
1934 if (u)
1935 userdata_free(u);
1936
1937 if (profile_set)
1938 pa_alsa_profile_set_free(profile_set);
1939
1940 return NULL;
1941 }
1942
1943 static void userdata_free(struct userdata *u) {
1944 pa_assert(u);
1945
1946 if (u->sink)
1947 pa_sink_unlink(u->sink);
1948
1949 if (u->thread) {
1950 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1951 pa_thread_free(u->thread);
1952 }
1953
1954 pa_thread_mq_done(&u->thread_mq);
1955
1956 if (u->sink)
1957 pa_sink_unref(u->sink);
1958
1959 if (u->memchunk.memblock)
1960 pa_memblock_unref(u->memchunk.memblock);
1961
1962 if (u->alsa_rtpoll_item)
1963 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1964
1965 if (u->rtpoll)
1966 pa_rtpoll_free(u->rtpoll);
1967
1968 if (u->pcm_handle) {
1969 snd_pcm_drop(u->pcm_handle);
1970 snd_pcm_close(u->pcm_handle);
1971 }
1972
1973 if (u->mixer_fdl)
1974 pa_alsa_fdlist_free(u->mixer_fdl);
1975
1976 if (u->mixer_path_set)
1977 pa_alsa_path_set_free(u->mixer_path_set);
1978 else if (u->mixer_path)
1979 pa_alsa_path_free(u->mixer_path);
1980
1981 if (u->mixer_handle)
1982 snd_mixer_close(u->mixer_handle);
1983
1984 if (u->smoother)
1985 pa_smoother_free(u->smoother);
1986
1987 reserve_done(u);
1988 monitor_done(u);
1989
1990 pa_xfree(u->device_name);
1991 pa_xfree(u->control_device);
1992 pa_xfree(u);
1993 }
1994
1995 void pa_alsa_sink_free(pa_sink *s) {
1996 struct userdata *u;
1997
1998 pa_sink_assert_ref(s);
1999 pa_assert_se(u = s->userdata);
2000
2001 userdata_free(u);
2002 }