]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms -- If the buffer level ever below this theshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */
74
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means tht we
76 * will increase the watermark only if we hit a real underrun. */
77
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
80
81 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
82 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
83
84 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
85
86 struct userdata {
87 pa_core *core;
88 pa_module *module;
89 pa_sink *sink;
90
91 pa_thread *thread;
92 pa_thread_mq thread_mq;
93 pa_rtpoll *rtpoll;
94
95 snd_pcm_t *pcm_handle;
96
97 pa_alsa_fdlist *mixer_fdl;
98 snd_mixer_t *mixer_handle;
99 pa_alsa_path_set *mixer_path_set;
100 pa_alsa_path *mixer_path;
101
102 pa_cvolume hardware_volume;
103
104 size_t
105 frame_size,
106 fragment_size,
107 hwbuf_size,
108 tsched_watermark,
109 hwbuf_unused,
110 min_sleep,
111 min_wakeup,
112 watermark_inc_step,
113 watermark_dec_step,
114 watermark_inc_threshold,
115 watermark_dec_threshold;
116
117 pa_usec_t watermark_dec_not_before;
118
119 unsigned nfragments;
120 pa_memchunk memchunk;
121
122 char *device_name; /* name of the PCM device */
123 char *control_device; /* name of the control device */
124
125 pa_bool_t use_mmap:1, use_tsched:1;
126
127 pa_bool_t first, after_rewind;
128
129 pa_rtpoll_item *alsa_rtpoll_item;
130
131 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
132
133 pa_smoother *smoother;
134 uint64_t write_count;
135 uint64_t since_start;
136 pa_usec_t smoother_interval;
137 pa_usec_t last_smoother_update;
138
139 pa_reserve_wrapper *reserve;
140 pa_hook_slot *reserve_slot;
141 pa_reserve_monitor_wrapper *monitor;
142 pa_hook_slot *monitor_slot;
143 };
144
145 static void userdata_free(struct userdata *u);
146
147 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
148 pa_assert(r);
149 pa_assert(u);
150
151 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
152 return PA_HOOK_CANCEL;
153
154 return PA_HOOK_OK;
155 }
156
157 static void reserve_done(struct userdata *u) {
158 pa_assert(u);
159
160 if (u->reserve_slot) {
161 pa_hook_slot_free(u->reserve_slot);
162 u->reserve_slot = NULL;
163 }
164
165 if (u->reserve) {
166 pa_reserve_wrapper_unref(u->reserve);
167 u->reserve = NULL;
168 }
169 }
170
171 static void reserve_update(struct userdata *u) {
172 const char *description;
173 pa_assert(u);
174
175 if (!u->sink || !u->reserve)
176 return;
177
178 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
179 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
180 }
181
182 static int reserve_init(struct userdata *u, const char *dname) {
183 char *rname;
184
185 pa_assert(u);
186 pa_assert(dname);
187
188 if (u->reserve)
189 return 0;
190
191 if (pa_in_system_mode())
192 return 0;
193
194 if (!(rname = pa_alsa_get_reserve_name(dname)))
195 return 0;
196
197 /* We are resuming, try to lock the device */
198 u->reserve = pa_reserve_wrapper_get(u->core, rname);
199 pa_xfree(rname);
200
201 if (!(u->reserve))
202 return -1;
203
204 reserve_update(u);
205
206 pa_assert(!u->reserve_slot);
207 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
208
209 return 0;
210 }
211
212 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
213 pa_bool_t b;
214
215 pa_assert(w);
216 pa_assert(u);
217
218 b = PA_PTR_TO_UINT(busy) && !u->reserve;
219
220 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
221 return PA_HOOK_OK;
222 }
223
224 static void monitor_done(struct userdata *u) {
225 pa_assert(u);
226
227 if (u->monitor_slot) {
228 pa_hook_slot_free(u->monitor_slot);
229 u->monitor_slot = NULL;
230 }
231
232 if (u->monitor) {
233 pa_reserve_monitor_wrapper_unref(u->monitor);
234 u->monitor = NULL;
235 }
236 }
237
238 static int reserve_monitor_init(struct userdata *u, const char *dname) {
239 char *rname;
240
241 pa_assert(u);
242 pa_assert(dname);
243
244 if (pa_in_system_mode())
245 return 0;
246
247 if (!(rname = pa_alsa_get_reserve_name(dname)))
248 return 0;
249
250 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
251 pa_xfree(rname);
252
253 if (!(u->monitor))
254 return -1;
255
256 pa_assert(!u->monitor_slot);
257 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
258
259 return 0;
260 }
261
262 static void fix_min_sleep_wakeup(struct userdata *u) {
263 size_t max_use, max_use_2;
264
265 pa_assert(u);
266 pa_assert(u->use_tsched);
267
268 max_use = u->hwbuf_size - u->hwbuf_unused;
269 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
270
271 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
272 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
273
274 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
275 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
276 }
277
278 static void fix_tsched_watermark(struct userdata *u) {
279 size_t max_use;
280 pa_assert(u);
281 pa_assert(u->use_tsched);
282
283 max_use = u->hwbuf_size - u->hwbuf_unused;
284
285 if (u->tsched_watermark > max_use - u->min_sleep)
286 u->tsched_watermark = max_use - u->min_sleep;
287
288 if (u->tsched_watermark < u->min_wakeup)
289 u->tsched_watermark = u->min_wakeup;
290 }
291
292 static void increase_watermark(struct userdata *u) {
293 size_t old_watermark;
294 pa_usec_t old_min_latency, new_min_latency;
295
296 pa_assert(u);
297 pa_assert(u->use_tsched);
298
299 /* First, just try to increase the watermark */
300 old_watermark = u->tsched_watermark;
301 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
302 fix_tsched_watermark(u);
303
304 if (old_watermark != u->tsched_watermark) {
305 pa_log_info("Increasing wakeup watermark to %0.2f ms",
306 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
307 return;
308 }
309
310 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
311 old_min_latency = u->sink->thread_info.min_latency;
312 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
313 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
314
315 if (old_min_latency != new_min_latency) {
316 pa_log_info("Increasing minimal latency to %0.2f ms",
317 (double) new_min_latency / PA_USEC_PER_MSEC);
318
319 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
320 }
321
322 /* When we reach this we're officialy fucked! */
323 }
324
325 static void decrease_watermark(struct userdata *u) {
326 size_t old_watermark;
327 pa_usec_t now;
328
329 pa_assert(u);
330 pa_assert(u->use_tsched);
331
332 now = pa_rtclock_now();
333
334 if (u->watermark_dec_not_before <= 0)
335 goto restart;
336
337 if (u->watermark_dec_not_before > now)
338 return;
339
340 old_watermark = u->tsched_watermark;
341
342 if (u->tsched_watermark < u->watermark_dec_step)
343 u->tsched_watermark = u->tsched_watermark / 2;
344 else
345 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
346
347 fix_tsched_watermark(u);
348
349 if (old_watermark != u->tsched_watermark)
350 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
351 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
352
353 /* We don't change the latency range*/
354
355 restart:
356 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
357 }
358
359 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
360 pa_usec_t usec, wm;
361
362 pa_assert(sleep_usec);
363 pa_assert(process_usec);
364
365 pa_assert(u);
366 pa_assert(u->use_tsched);
367
368 usec = pa_sink_get_requested_latency_within_thread(u->sink);
369
370 if (usec == (pa_usec_t) -1)
371 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
372
373 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
374
375 if (wm > usec)
376 wm = usec/2;
377
378 *sleep_usec = usec - wm;
379 *process_usec = wm;
380
381 #ifdef DEBUG_TIMING
382 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
383 (unsigned long) (usec / PA_USEC_PER_MSEC),
384 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
385 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
386 #endif
387 }
388
389 static int try_recover(struct userdata *u, const char *call, int err) {
390 pa_assert(u);
391 pa_assert(call);
392 pa_assert(err < 0);
393
394 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
395
396 pa_assert(err != -EAGAIN);
397
398 if (err == -EPIPE)
399 pa_log_debug("%s: Buffer underrun!", call);
400
401 if (err == -ESTRPIPE)
402 pa_log_debug("%s: System suspended!", call);
403
404 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
405 pa_log("%s: %s", call, pa_alsa_strerror(err));
406 return -1;
407 }
408
409 u->first = TRUE;
410 u->since_start = 0;
411 return 0;
412 }
413
414 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
415 size_t left_to_play;
416 pa_bool_t underrun = FALSE;
417
418 /* We use <= instead of < for this check here because an underrun
419 * only happens after the last sample was processed, not already when
420 * it is removed from the buffer. This is particularly important
421 * when block transfer is used. */
422
423 if (n_bytes <= u->hwbuf_size)
424 left_to_play = u->hwbuf_size - n_bytes;
425 else {
426
427 /* We got a dropout. What a mess! */
428 left_to_play = 0;
429 underrun = TRUE;
430
431 #ifdef DEBUG_TIMING
432 PA_DEBUG_TRAP;
433 #endif
434
435 if (!u->first && !u->after_rewind)
436 if (pa_log_ratelimit())
437 pa_log_info("Underrun!");
438 }
439
440 #ifdef DEBUG_TIMING
441 pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
442 (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
443 (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
444 (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
445 #endif
446
447 if (u->use_tsched) {
448 pa_bool_t reset_not_before = TRUE;
449
450 if (!u->first && !u->after_rewind) {
451 if (underrun || left_to_play < u->watermark_inc_threshold)
452 increase_watermark(u);
453 else if (left_to_play > u->watermark_dec_threshold) {
454 reset_not_before = FALSE;
455
456 /* We decrease the watermark only if have actually
457 * been woken up by a timeout. If something else woke
458 * us up it's too easy to fulfill the deadlines... */
459
460 if (on_timeout)
461 decrease_watermark(u);
462 }
463 }
464
465 if (reset_not_before)
466 u->watermark_dec_not_before = 0;
467 }
468
469 return left_to_play;
470 }
471
472 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
473 pa_bool_t work_done = TRUE;
474 pa_usec_t max_sleep_usec = 0, process_usec = 0;
475 size_t left_to_play;
476 unsigned j = 0;
477
478 pa_assert(u);
479 pa_sink_assert_ref(u->sink);
480
481 if (u->use_tsched)
482 hw_sleep_time(u, &max_sleep_usec, &process_usec);
483
484 for (;;) {
485 snd_pcm_sframes_t n;
486 size_t n_bytes;
487 int r;
488 pa_bool_t after_avail = TRUE;
489
490 /* First we determine how many samples are missing to fill the
491 * buffer up to 100% */
492
493 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
494
495 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
496 continue;
497
498 return r;
499 }
500
501 n_bytes = (size_t) n * u->frame_size;
502
503 #ifdef DEBUG_TIMING
504 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
505 #endif
506
507 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
508 on_timeout = FALSE;
509
510 if (u->use_tsched)
511
512 /* We won't fill up the playback buffer before at least
513 * half the sleep time is over because otherwise we might
514 * ask for more data from the clients then they expect. We
515 * need to guarantee that clients only have to keep around
516 * a single hw buffer length. */
517
518 if (!polled &&
519 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
520 #ifdef DEBUG_TIMING
521 pa_log_debug("Not filling up, because too early.");
522 #endif
523 break;
524 }
525
526 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
527
528 if (polled)
529 PA_ONCE_BEGIN {
530 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
531 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
532 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
533 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
534 pa_strnull(dn));
535 pa_xfree(dn);
536 } PA_ONCE_END;
537
538 #ifdef DEBUG_TIMING
539 pa_log_debug("Not filling up, because not necessary.");
540 #endif
541 break;
542 }
543
544
545 if (++j > 10) {
546 #ifdef DEBUG_TIMING
547 pa_log_debug("Not filling up, because already too many iterations.");
548 #endif
549
550 break;
551 }
552
553 n_bytes -= u->hwbuf_unused;
554 polled = FALSE;
555
556 #ifdef DEBUG_TIMING
557 pa_log_debug("Filling up");
558 #endif
559
560 for (;;) {
561 pa_memchunk chunk;
562 void *p;
563 int err;
564 const snd_pcm_channel_area_t *areas;
565 snd_pcm_uframes_t offset, frames;
566 snd_pcm_sframes_t sframes;
567
568 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
569 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
570
571 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
572
573 if (!after_avail && err == -EAGAIN)
574 break;
575
576 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
577 continue;
578
579 return r;
580 }
581
582 /* Make sure that if these memblocks need to be copied they will fit into one slot */
583 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
584 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
585
586 if (!after_avail && frames == 0)
587 break;
588
589 pa_assert(frames > 0);
590 after_avail = FALSE;
591
592 /* Check these are multiples of 8 bit */
593 pa_assert((areas[0].first & 7) == 0);
594 pa_assert((areas[0].step & 7)== 0);
595
596 /* We assume a single interleaved memory buffer */
597 pa_assert((areas[0].first >> 3) == 0);
598 pa_assert((areas[0].step >> 3) == u->frame_size);
599
600 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
601
602 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
603 chunk.length = pa_memblock_get_length(chunk.memblock);
604 chunk.index = 0;
605
606 pa_sink_render_into_full(u->sink, &chunk);
607 pa_memblock_unref_fixed(chunk.memblock);
608
609 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
610
611 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
612 continue;
613
614 return r;
615 }
616
617 work_done = TRUE;
618
619 u->write_count += frames * u->frame_size;
620 u->since_start += frames * u->frame_size;
621
622 #ifdef DEBUG_TIMING
623 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
624 #endif
625
626 if ((size_t) frames * u->frame_size >= n_bytes)
627 break;
628
629 n_bytes -= (size_t) frames * u->frame_size;
630 }
631 }
632
633 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
634
635 if (*sleep_usec > process_usec)
636 *sleep_usec -= process_usec;
637 else
638 *sleep_usec = 0;
639
640 return work_done ? 1 : 0;
641 }
642
643 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
644 pa_bool_t work_done = FALSE;
645 pa_usec_t max_sleep_usec = 0, process_usec = 0;
646 size_t left_to_play;
647 unsigned j = 0;
648
649 pa_assert(u);
650 pa_sink_assert_ref(u->sink);
651
652 if (u->use_tsched)
653 hw_sleep_time(u, &max_sleep_usec, &process_usec);
654
655 for (;;) {
656 snd_pcm_sframes_t n;
657 size_t n_bytes;
658 int r;
659
660 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
661
662 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
663 continue;
664
665 return r;
666 }
667
668 n_bytes = (size_t) n * u->frame_size;
669 left_to_play = check_left_to_play(u, n_bytes, on_timeout);
670 on_timeout = FALSE;
671
672 if (u->use_tsched)
673
674 /* We won't fill up the playback buffer before at least
675 * half the sleep time is over because otherwise we might
676 * ask for more data from the clients then they expect. We
677 * need to guarantee that clients only have to keep around
678 * a single hw buffer length. */
679
680 if (!polled &&
681 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
682 break;
683
684 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
685
686 if (polled)
687 PA_ONCE_BEGIN {
688 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
689 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
690 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
691 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
692 pa_strnull(dn));
693 pa_xfree(dn);
694 } PA_ONCE_END;
695
696 break;
697 }
698
699 if (++j > 10) {
700 #ifdef DEBUG_TIMING
701 pa_log_debug("Not filling up, because already too many iterations.");
702 #endif
703
704 break;
705 }
706
707 n_bytes -= u->hwbuf_unused;
708 polled = FALSE;
709
710 for (;;) {
711 snd_pcm_sframes_t frames;
712 void *p;
713 pa_bool_t after_avail = TRUE;
714
715 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
716
717 if (u->memchunk.length <= 0)
718 pa_sink_render(u->sink, n_bytes, &u->memchunk);
719
720 pa_assert(u->memchunk.length > 0);
721
722 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
723
724 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
725 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
726
727 p = pa_memblock_acquire(u->memchunk.memblock);
728 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
729 pa_memblock_release(u->memchunk.memblock);
730
731 if (PA_UNLIKELY(frames < 0)) {
732
733 if (!after_avail && (int) frames == -EAGAIN)
734 break;
735
736 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
737 continue;
738
739 return r;
740 }
741
742 if (!after_avail && frames == 0)
743 break;
744
745 pa_assert(frames > 0);
746 after_avail = FALSE;
747
748 u->memchunk.index += (size_t) frames * u->frame_size;
749 u->memchunk.length -= (size_t) frames * u->frame_size;
750
751 if (u->memchunk.length <= 0) {
752 pa_memblock_unref(u->memchunk.memblock);
753 pa_memchunk_reset(&u->memchunk);
754 }
755
756 work_done = TRUE;
757
758 u->write_count += frames * u->frame_size;
759 u->since_start += frames * u->frame_size;
760
761 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
762
763 if ((size_t) frames * u->frame_size >= n_bytes)
764 break;
765
766 n_bytes -= (size_t) frames * u->frame_size;
767 }
768 }
769
770 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
771
772 if (*sleep_usec > process_usec)
773 *sleep_usec -= process_usec;
774 else
775 *sleep_usec = 0;
776
777 return work_done ? 1 : 0;
778 }
779
780 static void update_smoother(struct userdata *u) {
781 snd_pcm_sframes_t delay = 0;
782 int64_t position;
783 int err;
784 pa_usec_t now1 = 0, now2;
785 snd_pcm_status_t *status;
786
787 snd_pcm_status_alloca(&status);
788
789 pa_assert(u);
790 pa_assert(u->pcm_handle);
791
792 /* Let's update the time smoother */
793
794 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
795 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
796 return;
797 }
798
799 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
800 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
801 else {
802 snd_htimestamp_t htstamp = { 0, 0 };
803 snd_pcm_status_get_htstamp(status, &htstamp);
804 now1 = pa_timespec_load(&htstamp);
805 }
806
807 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
808 if (now1 <= 0)
809 now1 = pa_rtclock_now();
810
811 /* check if the time since the last update is bigger than the interval */
812 if (u->last_smoother_update > 0)
813 if (u->last_smoother_update + u->smoother_interval > now1)
814 return;
815
816 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
817
818 if (PA_UNLIKELY(position < 0))
819 position = 0;
820
821 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
822
823 pa_smoother_put(u->smoother, now1, now2);
824
825 u->last_smoother_update = now1;
826 /* exponentially increase the update interval up to the MAX limit */
827 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
828 }
829
830 static pa_usec_t sink_get_latency(struct userdata *u) {
831 pa_usec_t r;
832 int64_t delay;
833 pa_usec_t now1, now2;
834
835 pa_assert(u);
836
837 now1 = pa_rtclock_now();
838 now2 = pa_smoother_get(u->smoother, now1);
839
840 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
841
842 r = delay >= 0 ? (pa_usec_t) delay : 0;
843
844 if (u->memchunk.memblock)
845 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
846
847 return r;
848 }
849
850 static int build_pollfd(struct userdata *u) {
851 pa_assert(u);
852 pa_assert(u->pcm_handle);
853
854 if (u->alsa_rtpoll_item)
855 pa_rtpoll_item_free(u->alsa_rtpoll_item);
856
857 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
858 return -1;
859
860 return 0;
861 }
862
863 /* Called from IO context */
864 static int suspend(struct userdata *u) {
865 pa_assert(u);
866 pa_assert(u->pcm_handle);
867
868 pa_smoother_pause(u->smoother, pa_rtclock_now());
869
870 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
871 * take awfully long with our long buffer sizes today. */
872 snd_pcm_close(u->pcm_handle);
873 u->pcm_handle = NULL;
874
875 if (u->alsa_rtpoll_item) {
876 pa_rtpoll_item_free(u->alsa_rtpoll_item);
877 u->alsa_rtpoll_item = NULL;
878 }
879
880 pa_log_info("Device suspended...");
881
882 return 0;
883 }
884
885 /* Called from IO context */
886 static int update_sw_params(struct userdata *u) {
887 snd_pcm_uframes_t avail_min;
888 int err;
889
890 pa_assert(u);
891
892 /* Use the full buffer if noone asked us for anything specific */
893 u->hwbuf_unused = 0;
894
895 if (u->use_tsched) {
896 pa_usec_t latency;
897
898 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
899 size_t b;
900
901 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
902
903 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
904
905 /* We need at least one sample in our buffer */
906
907 if (PA_UNLIKELY(b < u->frame_size))
908 b = u->frame_size;
909
910 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
911 }
912
913 fix_min_sleep_wakeup(u);
914 fix_tsched_watermark(u);
915 }
916
917 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
918
919 /* We need at last one frame in the used part of the buffer */
920 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
921
922 if (u->use_tsched) {
923 pa_usec_t sleep_usec, process_usec;
924
925 hw_sleep_time(u, &sleep_usec, &process_usec);
926 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
927 }
928
929 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
930
931 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
932 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
933 return err;
934 }
935
936 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
937
938 return 0;
939 }
940
941 /* Called from IO context */
942 static int unsuspend(struct userdata *u) {
943 pa_sample_spec ss;
944 int err;
945 pa_bool_t b, d;
946 unsigned nfrags;
947 snd_pcm_uframes_t period_size;
948
949 pa_assert(u);
950 pa_assert(!u->pcm_handle);
951
952 pa_log_info("Trying resume...");
953
954 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
955 /*SND_PCM_NONBLOCK|*/
956 SND_PCM_NO_AUTO_RESAMPLE|
957 SND_PCM_NO_AUTO_CHANNELS|
958 SND_PCM_NO_AUTO_FORMAT)) < 0) {
959 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
960 goto fail;
961 }
962
963 ss = u->sink->sample_spec;
964 nfrags = u->nfragments;
965 period_size = u->fragment_size / u->frame_size;
966 b = u->use_mmap;
967 d = u->use_tsched;
968
969 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
970 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
971 goto fail;
972 }
973
974 if (b != u->use_mmap || d != u->use_tsched) {
975 pa_log_warn("Resume failed, couldn't get original access mode.");
976 goto fail;
977 }
978
979 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
980 pa_log_warn("Resume failed, couldn't restore original sample settings.");
981 goto fail;
982 }
983
984 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
985 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
986 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
987 (unsigned long) nfrags, period_size * u->frame_size);
988 goto fail;
989 }
990
991 if (update_sw_params(u) < 0)
992 goto fail;
993
994 if (build_pollfd(u) < 0)
995 goto fail;
996
997 u->write_count = 0;
998 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
999 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1000 u->last_smoother_update = 0;
1001
1002 u->first = TRUE;
1003 u->since_start = 0;
1004
1005 pa_log_info("Resumed successfully...");
1006
1007 return 0;
1008
1009 fail:
1010 if (u->pcm_handle) {
1011 snd_pcm_close(u->pcm_handle);
1012 u->pcm_handle = NULL;
1013 }
1014
1015 return -1;
1016 }
1017
1018 /* Called from IO context */
1019 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1020 struct userdata *u = PA_SINK(o)->userdata;
1021
1022 switch (code) {
1023
1024 case PA_SINK_MESSAGE_GET_LATENCY: {
1025 pa_usec_t r = 0;
1026
1027 if (u->pcm_handle)
1028 r = sink_get_latency(u);
1029
1030 *((pa_usec_t*) data) = r;
1031
1032 return 0;
1033 }
1034
1035 case PA_SINK_MESSAGE_SET_STATE:
1036
1037 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
1038
1039 case PA_SINK_SUSPENDED:
1040 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
1041
1042 if (suspend(u) < 0)
1043 return -1;
1044
1045 break;
1046
1047 case PA_SINK_IDLE:
1048 case PA_SINK_RUNNING:
1049
1050 if (u->sink->thread_info.state == PA_SINK_INIT) {
1051 if (build_pollfd(u) < 0)
1052 return -1;
1053 }
1054
1055 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
1056 if (unsuspend(u) < 0)
1057 return -1;
1058 }
1059
1060 break;
1061
1062 case PA_SINK_UNLINKED:
1063 case PA_SINK_INIT:
1064 case PA_SINK_INVALID_STATE:
1065 ;
1066 }
1067
1068 break;
1069 }
1070
1071 return pa_sink_process_msg(o, code, data, offset, chunk);
1072 }
1073
1074 /* Called from main context */
1075 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1076 pa_sink_state_t old_state;
1077 struct userdata *u;
1078
1079 pa_sink_assert_ref(s);
1080 pa_assert_se(u = s->userdata);
1081
1082 old_state = pa_sink_get_state(u->sink);
1083
1084 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1085 reserve_done(u);
1086 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1087 if (reserve_init(u, u->device_name) < 0)
1088 return -1;
1089
1090 return 0;
1091 }
1092
1093 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1094 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1095
1096 pa_assert(u);
1097 pa_assert(u->mixer_handle);
1098
1099 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1100 return 0;
1101
1102 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1103 pa_sink_get_volume(u->sink, TRUE);
1104 pa_sink_get_mute(u->sink, TRUE);
1105 }
1106
1107 return 0;
1108 }
1109
1110 static void sink_get_volume_cb(pa_sink *s) {
1111 struct userdata *u = s->userdata;
1112 pa_cvolume r;
1113 char t[PA_CVOLUME_SNPRINT_MAX];
1114
1115 pa_assert(u);
1116 pa_assert(u->mixer_path);
1117 pa_assert(u->mixer_handle);
1118
1119 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1120 return;
1121
1122 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1123 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1124
1125 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1126
1127 if (pa_cvolume_equal(&u->hardware_volume, &r))
1128 return;
1129
1130 s->real_volume = u->hardware_volume = r;
1131
1132 /* Hmm, so the hardware volume changed, let's reset our software volume */
1133 if (u->mixer_path->has_dB)
1134 pa_sink_set_soft_volume(s, NULL);
1135 }
1136
1137 static void sink_set_volume_cb(pa_sink *s) {
1138 struct userdata *u = s->userdata;
1139 pa_cvolume r;
1140 char t[PA_CVOLUME_SNPRINT_MAX];
1141
1142 pa_assert(u);
1143 pa_assert(u->mixer_path);
1144 pa_assert(u->mixer_handle);
1145
1146 /* Shift up by the base volume */
1147 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1148
1149 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1150 return;
1151
1152 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1153 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1154
1155 u->hardware_volume = r;
1156
1157 if (u->mixer_path->has_dB) {
1158 pa_cvolume new_soft_volume;
1159 pa_bool_t accurate_enough;
1160
1161 /* Match exactly what the user requested by software */
1162 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1163
1164 /* If the adjustment to do in software is only minimal we
1165 * can skip it. That saves us CPU at the expense of a bit of
1166 * accuracy */
1167 accurate_enough =
1168 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1169 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1170
1171 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1172 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1173 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1174 pa_yes_no(accurate_enough));
1175
1176 if (!accurate_enough)
1177 s->soft_volume = new_soft_volume;
1178
1179 } else {
1180 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1181
1182 /* We can't match exactly what the user requested, hence let's
1183 * at least tell the user about it */
1184
1185 s->real_volume = r;
1186 }
1187 }
1188
1189 static void sink_get_mute_cb(pa_sink *s) {
1190 struct userdata *u = s->userdata;
1191 pa_bool_t b;
1192
1193 pa_assert(u);
1194 pa_assert(u->mixer_path);
1195 pa_assert(u->mixer_handle);
1196
1197 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1198 return;
1199
1200 s->muted = b;
1201 }
1202
1203 static void sink_set_mute_cb(pa_sink *s) {
1204 struct userdata *u = s->userdata;
1205
1206 pa_assert(u);
1207 pa_assert(u->mixer_path);
1208 pa_assert(u->mixer_handle);
1209
1210 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1211 }
1212
1213 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1214 struct userdata *u = s->userdata;
1215 pa_alsa_port_data *data;
1216
1217 pa_assert(u);
1218 pa_assert(p);
1219 pa_assert(u->mixer_handle);
1220
1221 data = PA_DEVICE_PORT_DATA(p);
1222
1223 pa_assert_se(u->mixer_path = data->path);
1224 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1225
1226 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1227 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1228 s->n_volume_steps = PA_VOLUME_NORM+1;
1229
1230 if (u->mixer_path->max_dB > 0.0)
1231 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1232 else
1233 pa_log_info("No particular base volume set, fixing to 0 dB");
1234 } else {
1235 s->base_volume = PA_VOLUME_NORM;
1236 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1237 }
1238
1239 if (data->setting)
1240 pa_alsa_setting_select(data->setting, u->mixer_handle);
1241
1242 if (s->set_mute)
1243 s->set_mute(s);
1244 if (s->set_volume)
1245 s->set_volume(s);
1246
1247 return 0;
1248 }
1249
1250 static void sink_update_requested_latency_cb(pa_sink *s) {
1251 struct userdata *u = s->userdata;
1252 size_t before;
1253 pa_assert(u);
1254
1255 if (!u->pcm_handle)
1256 return;
1257
1258 before = u->hwbuf_unused;
1259 update_sw_params(u);
1260
1261 /* Let's check whether we now use only a smaller part of the
1262 buffer then before. If so, we need to make sure that subsequent
1263 rewinds are relative to the new maximum fill level and not to the
1264 current fill level. Thus, let's do a full rewind once, to clear
1265 things up. */
1266
1267 if (u->hwbuf_unused > before) {
1268 pa_log_debug("Requesting rewind due to latency change.");
1269 pa_sink_request_rewind(s, (size_t) -1);
1270 }
1271 }
1272
1273 static int process_rewind(struct userdata *u) {
1274 snd_pcm_sframes_t unused;
1275 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1276 pa_assert(u);
1277
1278 /* Figure out how much we shall rewind and reset the counter */
1279 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1280
1281 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1282
1283 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1284 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1285 return -1;
1286 }
1287
1288 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1289
1290 if (u->hwbuf_size > unused_nbytes)
1291 limit_nbytes = u->hwbuf_size - unused_nbytes;
1292 else
1293 limit_nbytes = 0;
1294
1295 if (rewind_nbytes > limit_nbytes)
1296 rewind_nbytes = limit_nbytes;
1297
1298 if (rewind_nbytes > 0) {
1299 snd_pcm_sframes_t in_frames, out_frames;
1300
1301 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1302
1303 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1304 pa_log_debug("before: %lu", (unsigned long) in_frames);
1305 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1306 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1307 if (try_recover(u, "process_rewind", out_frames) < 0)
1308 return -1;
1309 out_frames = 0;
1310 }
1311
1312 pa_log_debug("after: %lu", (unsigned long) out_frames);
1313
1314 rewind_nbytes = (size_t) out_frames * u->frame_size;
1315
1316 if (rewind_nbytes <= 0)
1317 pa_log_info("Tried rewind, but was apparently not possible.");
1318 else {
1319 u->write_count -= rewind_nbytes;
1320 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1321 pa_sink_process_rewind(u->sink, rewind_nbytes);
1322
1323 u->after_rewind = TRUE;
1324 return 0;
1325 }
1326 } else
1327 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1328
1329 pa_sink_process_rewind(u->sink, 0);
1330 return 0;
1331 }
1332
1333 static void thread_func(void *userdata) {
1334 struct userdata *u = userdata;
1335 unsigned short revents = 0;
1336
1337 pa_assert(u);
1338
1339 pa_log_debug("Thread starting up");
1340
1341 if (u->core->realtime_scheduling)
1342 pa_make_realtime(u->core->realtime_priority);
1343
1344 pa_thread_mq_install(&u->thread_mq);
1345
1346 for (;;) {
1347 int ret;
1348
1349 #ifdef DEBUG_TIMING
1350 pa_log_debug("Loop");
1351 #endif
1352
1353 /* Render some data and write it to the dsp */
1354 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1355 int work_done;
1356 pa_usec_t sleep_usec = 0;
1357 pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1358
1359 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1360 if (process_rewind(u) < 0)
1361 goto fail;
1362
1363 if (u->use_mmap)
1364 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1365 else
1366 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1367
1368 if (work_done < 0)
1369 goto fail;
1370
1371 /* pa_log_debug("work_done = %i", work_done); */
1372
1373 if (work_done) {
1374
1375 if (u->first) {
1376 pa_log_info("Starting playback.");
1377 snd_pcm_start(u->pcm_handle);
1378
1379 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1380 }
1381
1382 update_smoother(u);
1383 }
1384
1385 if (u->use_tsched) {
1386 pa_usec_t cusec;
1387
1388 if (u->since_start <= u->hwbuf_size) {
1389
1390 /* USB devices on ALSA seem to hit a buffer
1391 * underrun during the first iterations much
1392 * quicker then we calculate here, probably due to
1393 * the transport latency. To accommodate for that
1394 * we artificially decrease the sleep time until
1395 * we have filled the buffer at least once
1396 * completely.*/
1397
1398 if (pa_log_ratelimit())
1399 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1400 sleep_usec /= 2;
1401 }
1402
1403 /* OK, the playback buffer is now full, let's
1404 * calculate when to wake up next */
1405 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1406
1407 /* Convert from the sound card time domain to the
1408 * system time domain */
1409 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1410
1411 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1412
1413 /* We don't trust the conversion, so we wake up whatever comes first */
1414 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1415 }
1416
1417 u->first = FALSE;
1418 u->after_rewind = FALSE;
1419
1420 } else if (u->use_tsched)
1421
1422 /* OK, we're in an invalid state, let's disable our timers */
1423 pa_rtpoll_set_timer_disabled(u->rtpoll);
1424
1425 /* Hmm, nothing to do. Let's sleep */
1426 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1427 goto fail;
1428
1429 if (ret == 0)
1430 goto finish;
1431
1432 /* Tell ALSA about this and process its response */
1433 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1434 struct pollfd *pollfd;
1435 int err;
1436 unsigned n;
1437
1438 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1439
1440 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1441 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1442 goto fail;
1443 }
1444
1445 if (revents & ~POLLOUT) {
1446 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1447 goto fail;
1448
1449 u->first = TRUE;
1450 u->since_start = 0;
1451 } else if (revents && u->use_tsched && pa_log_ratelimit())
1452 pa_log_debug("Wakeup from ALSA!");
1453
1454 } else
1455 revents = 0;
1456 }
1457
1458 fail:
1459 /* If this was no regular exit from the loop we have to continue
1460 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1461 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1462 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1463
1464 finish:
1465 pa_log_debug("Thread shutting down");
1466 }
1467
1468 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1469 const char *n;
1470 char *t;
1471
1472 pa_assert(data);
1473 pa_assert(ma);
1474 pa_assert(device_name);
1475
1476 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1477 pa_sink_new_data_set_name(data, n);
1478 data->namereg_fail = TRUE;
1479 return;
1480 }
1481
1482 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1483 data->namereg_fail = TRUE;
1484 else {
1485 n = device_id ? device_id : device_name;
1486 data->namereg_fail = FALSE;
1487 }
1488
1489 if (mapping)
1490 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1491 else
1492 t = pa_sprintf_malloc("alsa_output.%s", n);
1493
1494 pa_sink_new_data_set_name(data, t);
1495 pa_xfree(t);
1496 }
1497
1498 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1499
1500 if (!mapping && !element)
1501 return;
1502
1503 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1504 pa_log_info("Failed to find a working mixer device.");
1505 return;
1506 }
1507
1508 if (element) {
1509
1510 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1511 goto fail;
1512
1513 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1514 goto fail;
1515
1516 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1517 pa_alsa_path_dump(u->mixer_path);
1518 } else {
1519
1520 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1521 goto fail;
1522
1523 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1524
1525 pa_log_debug("Probed mixer paths:");
1526 pa_alsa_path_set_dump(u->mixer_path_set);
1527 }
1528
1529 return;
1530
1531 fail:
1532
1533 if (u->mixer_path_set) {
1534 pa_alsa_path_set_free(u->mixer_path_set);
1535 u->mixer_path_set = NULL;
1536 } else if (u->mixer_path) {
1537 pa_alsa_path_free(u->mixer_path);
1538 u->mixer_path = NULL;
1539 }
1540
1541 if (u->mixer_handle) {
1542 snd_mixer_close(u->mixer_handle);
1543 u->mixer_handle = NULL;
1544 }
1545 }
1546
1547 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1548 pa_assert(u);
1549
1550 if (!u->mixer_handle)
1551 return 0;
1552
1553 if (u->sink->active_port) {
1554 pa_alsa_port_data *data;
1555
1556 /* We have a list of supported paths, so let's activate the
1557 * one that has been chosen as active */
1558
1559 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1560 u->mixer_path = data->path;
1561
1562 pa_alsa_path_select(data->path, u->mixer_handle);
1563
1564 if (data->setting)
1565 pa_alsa_setting_select(data->setting, u->mixer_handle);
1566
1567 } else {
1568
1569 if (!u->mixer_path && u->mixer_path_set)
1570 u->mixer_path = u->mixer_path_set->paths;
1571
1572 if (u->mixer_path) {
1573 /* Hmm, we have only a single path, then let's activate it */
1574
1575 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1576
1577 if (u->mixer_path->settings)
1578 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1579 } else
1580 return 0;
1581 }
1582
1583 if (!u->mixer_path->has_volume)
1584 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1585 else {
1586
1587 if (u->mixer_path->has_dB) {
1588 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1589
1590 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1591 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1592
1593 if (u->mixer_path->max_dB > 0.0)
1594 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1595 else
1596 pa_log_info("No particular base volume set, fixing to 0 dB");
1597
1598 } else {
1599 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1600 u->sink->base_volume = PA_VOLUME_NORM;
1601 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1602 }
1603
1604 u->sink->get_volume = sink_get_volume_cb;
1605 u->sink->set_volume = sink_set_volume_cb;
1606
1607 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1608 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1609 }
1610
1611 if (!u->mixer_path->has_mute) {
1612 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1613 } else {
1614 u->sink->get_mute = sink_get_mute_cb;
1615 u->sink->set_mute = sink_set_mute_cb;
1616 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1617 pa_log_info("Using hardware mute control.");
1618 }
1619
1620 u->mixer_fdl = pa_alsa_fdlist_new();
1621
1622 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1623 pa_log("Failed to initialize file descriptor monitoring");
1624 return -1;
1625 }
1626
1627 if (u->mixer_path_set)
1628 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1629 else
1630 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1631
1632 return 0;
1633 }
1634
1635 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1636
1637 struct userdata *u = NULL;
1638 const char *dev_id = NULL;
1639 pa_sample_spec ss, requested_ss;
1640 pa_channel_map map;
1641 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1642 snd_pcm_uframes_t period_frames, tsched_frames;
1643 size_t frame_size;
1644 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1645 pa_sink_new_data data;
1646 pa_alsa_profile_set *profile_set = NULL;
1647
1648 pa_assert(m);
1649 pa_assert(ma);
1650
1651 ss = m->core->default_sample_spec;
1652 map = m->core->default_channel_map;
1653 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1654 pa_log("Failed to parse sample specification and channel map");
1655 goto fail;
1656 }
1657
1658 requested_ss = ss;
1659 frame_size = pa_frame_size(&ss);
1660
1661 nfrags = m->core->default_n_fragments;
1662 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1663 if (frag_size <= 0)
1664 frag_size = (uint32_t) frame_size;
1665 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1666 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1667
1668 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1669 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1670 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1671 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1672 pa_log("Failed to parse buffer metrics");
1673 goto fail;
1674 }
1675
1676 hwbuf_size = frag_size * nfrags;
1677 period_frames = frag_size/frame_size;
1678 tsched_frames = tsched_size/frame_size;
1679
1680 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1681 pa_log("Failed to parse mmap argument.");
1682 goto fail;
1683 }
1684
1685 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1686 pa_log("Failed to parse tsched argument.");
1687 goto fail;
1688 }
1689
1690 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1691 pa_log("Failed to parse ignore_dB argument.");
1692 goto fail;
1693 }
1694
1695 if (use_tsched && !pa_rtclock_hrtimer()) {
1696 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1697 use_tsched = FALSE;
1698 }
1699
1700 u = pa_xnew0(struct userdata, 1);
1701 u->core = m->core;
1702 u->module = m;
1703 u->use_mmap = use_mmap;
1704 u->use_tsched = use_tsched;
1705 u->first = TRUE;
1706 u->rtpoll = pa_rtpoll_new();
1707 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1708
1709 u->smoother = pa_smoother_new(
1710 DEFAULT_TSCHED_BUFFER_USEC*2,
1711 DEFAULT_TSCHED_BUFFER_USEC*2,
1712 TRUE,
1713 TRUE,
1714 5,
1715 pa_rtclock_now(),
1716 TRUE);
1717 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1718
1719 dev_id = pa_modargs_get_value(
1720 ma, "device_id",
1721 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1722
1723 if (reserve_init(u, dev_id) < 0)
1724 goto fail;
1725
1726 if (reserve_monitor_init(u, dev_id) < 0)
1727 goto fail;
1728
1729 b = use_mmap;
1730 d = use_tsched;
1731
1732 if (mapping) {
1733
1734 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1735 pa_log("device_id= not set");
1736 goto fail;
1737 }
1738
1739 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1740 dev_id,
1741 &u->device_name,
1742 &ss, &map,
1743 SND_PCM_STREAM_PLAYBACK,
1744 &nfrags, &period_frames, tsched_frames,
1745 &b, &d, mapping)))
1746
1747 goto fail;
1748
1749 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1750
1751 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1752 goto fail;
1753
1754 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1755 dev_id,
1756 &u->device_name,
1757 &ss, &map,
1758 SND_PCM_STREAM_PLAYBACK,
1759 &nfrags, &period_frames, tsched_frames,
1760 &b, &d, profile_set, &mapping)))
1761
1762 goto fail;
1763
1764 } else {
1765
1766 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1767 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1768 &u->device_name,
1769 &ss, &map,
1770 SND_PCM_STREAM_PLAYBACK,
1771 &nfrags, &period_frames, tsched_frames,
1772 &b, &d, FALSE)))
1773 goto fail;
1774 }
1775
1776 pa_assert(u->device_name);
1777 pa_log_info("Successfully opened device %s.", u->device_name);
1778
1779 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1780 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1781 goto fail;
1782 }
1783
1784 if (mapping)
1785 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1786
1787 if (use_mmap && !b) {
1788 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1789 u->use_mmap = use_mmap = FALSE;
1790 }
1791
1792 if (use_tsched && (!b || !d)) {
1793 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1794 u->use_tsched = use_tsched = FALSE;
1795 }
1796
1797 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1798 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1799 u->use_tsched = use_tsched = FALSE;
1800 }
1801
1802 if (u->use_mmap)
1803 pa_log_info("Successfully enabled mmap() mode.");
1804
1805 if (u->use_tsched)
1806 pa_log_info("Successfully enabled timer-based scheduling mode.");
1807
1808 /* ALSA might tweak the sample spec, so recalculate the frame size */
1809 frame_size = pa_frame_size(&ss);
1810
1811 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1812
1813 pa_sink_new_data_init(&data);
1814 data.driver = driver;
1815 data.module = m;
1816 data.card = card;
1817 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1818 pa_sink_new_data_set_sample_spec(&data, &ss);
1819 pa_sink_new_data_set_channel_map(&data, &map);
1820
1821 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1822 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1823 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1824 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1825 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1826
1827 if (mapping) {
1828 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1829 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1830 }
1831
1832 pa_alsa_init_description(data.proplist);
1833
1834 if (u->control_device)
1835 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1836
1837 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1838 pa_log("Invalid properties");
1839 pa_sink_new_data_done(&data);
1840 goto fail;
1841 }
1842
1843 if (u->mixer_path_set)
1844 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1845
1846 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1847 pa_sink_new_data_done(&data);
1848
1849 if (!u->sink) {
1850 pa_log("Failed to create sink object");
1851 goto fail;
1852 }
1853
1854 u->sink->parent.process_msg = sink_process_msg;
1855 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1856 u->sink->set_state = sink_set_state_cb;
1857 u->sink->set_port = sink_set_port_cb;
1858 u->sink->userdata = u;
1859
1860 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1861 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1862
1863 u->frame_size = frame_size;
1864 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1865 u->nfragments = nfrags;
1866 u->hwbuf_size = u->fragment_size * nfrags;
1867 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1868
1869 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1870 nfrags, (long unsigned) u->fragment_size,
1871 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1872
1873 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1874 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1875
1876 if (u->use_tsched) {
1877 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1878
1879 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
1880 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
1881
1882 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
1883 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
1884
1885 fix_min_sleep_wakeup(u);
1886 fix_tsched_watermark(u);
1887
1888 pa_sink_set_latency_range(u->sink,
1889 0,
1890 pa_bytes_to_usec(u->hwbuf_size, &ss));
1891
1892 pa_log_info("Time scheduling watermark is %0.2fms",
1893 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1894 } else
1895 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1896
1897
1898 reserve_update(u);
1899
1900 if (update_sw_params(u) < 0)
1901 goto fail;
1902
1903 if (setup_mixer(u, ignore_dB) < 0)
1904 goto fail;
1905
1906 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1907
1908 if (!(u->thread = pa_thread_new(thread_func, u))) {
1909 pa_log("Failed to create thread.");
1910 goto fail;
1911 }
1912
1913 /* Get initial mixer settings */
1914 if (data.volume_is_set) {
1915 if (u->sink->set_volume)
1916 u->sink->set_volume(u->sink);
1917 } else {
1918 if (u->sink->get_volume)
1919 u->sink->get_volume(u->sink);
1920 }
1921
1922 if (data.muted_is_set) {
1923 if (u->sink->set_mute)
1924 u->sink->set_mute(u->sink);
1925 } else {
1926 if (u->sink->get_mute)
1927 u->sink->get_mute(u->sink);
1928 }
1929
1930 pa_sink_put(u->sink);
1931
1932 if (profile_set)
1933 pa_alsa_profile_set_free(profile_set);
1934
1935 return u->sink;
1936
1937 fail:
1938
1939 if (u)
1940 userdata_free(u);
1941
1942 if (profile_set)
1943 pa_alsa_profile_set_free(profile_set);
1944
1945 return NULL;
1946 }
1947
1948 static void userdata_free(struct userdata *u) {
1949 pa_assert(u);
1950
1951 if (u->sink)
1952 pa_sink_unlink(u->sink);
1953
1954 if (u->thread) {
1955 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1956 pa_thread_free(u->thread);
1957 }
1958
1959 pa_thread_mq_done(&u->thread_mq);
1960
1961 if (u->sink)
1962 pa_sink_unref(u->sink);
1963
1964 if (u->memchunk.memblock)
1965 pa_memblock_unref(u->memchunk.memblock);
1966
1967 if (u->alsa_rtpoll_item)
1968 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1969
1970 if (u->rtpoll)
1971 pa_rtpoll_free(u->rtpoll);
1972
1973 if (u->pcm_handle) {
1974 snd_pcm_drop(u->pcm_handle);
1975 snd_pcm_close(u->pcm_handle);
1976 }
1977
1978 if (u->mixer_fdl)
1979 pa_alsa_fdlist_free(u->mixer_fdl);
1980
1981 if (u->mixer_path_set)
1982 pa_alsa_path_set_free(u->mixer_path_set);
1983 else if (u->mixer_path)
1984 pa_alsa_path_free(u->mixer_path);
1985
1986 if (u->mixer_handle)
1987 snd_mixer_close(u->mixer_handle);
1988
1989 if (u->smoother)
1990 pa_smoother_free(u->smoother);
1991
1992 reserve_done(u);
1993 monitor_done(u);
1994
1995 pa_xfree(u->device_name);
1996 pa_xfree(u->control_device);
1997 pa_xfree(u);
1998 }
1999
2000 void pa_alsa_sink_free(pa_sink *s) {
2001 struct userdata *u;
2002
2003 pa_sink_assert_ref(s);
2004 pa_assert_se(u = s->userdata);
2005
2006 userdata_free(u);
2007 }