]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
b99ed78235ada3b697dcfe498f4584c90245712c
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
72 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
73
74 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
75
76 struct userdata {
77 pa_core *core;
78 pa_module *module;
79 pa_sink *sink;
80
81 pa_thread *thread;
82 pa_thread_mq thread_mq;
83 pa_rtpoll *rtpoll;
84
85 snd_pcm_t *pcm_handle;
86
87 pa_alsa_fdlist *mixer_fdl;
88 snd_mixer_t *mixer_handle;
89 pa_alsa_path_set *mixer_path_set;
90 pa_alsa_path *mixer_path;
91
92 pa_cvolume hardware_volume;
93
94 size_t
95 frame_size,
96 fragment_size,
97 hwbuf_size,
98 tsched_watermark,
99 hwbuf_unused,
100 min_sleep,
101 min_wakeup,
102 watermark_step;
103
104 unsigned nfragments;
105 pa_memchunk memchunk;
106
107 char *device_name; /* name of the PCM device */
108 char *control_device; /* name of the control device */
109
110 pa_bool_t use_mmap:1, use_tsched:1;
111
112 pa_bool_t first, after_rewind;
113
114 pa_rtpoll_item *alsa_rtpoll_item;
115
116 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
117
118 pa_smoother *smoother;
119 uint64_t write_count;
120 uint64_t since_start;
121 pa_usec_t smoother_interval;
122 pa_usec_t last_smoother_update;
123
124 pa_reserve_wrapper *reserve;
125 pa_hook_slot *reserve_slot;
126 pa_reserve_monitor_wrapper *monitor;
127 pa_hook_slot *monitor_slot;
128 };
129
130 static void userdata_free(struct userdata *u);
131
132 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
133 pa_assert(r);
134 pa_assert(u);
135
136 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
137 return PA_HOOK_CANCEL;
138
139 return PA_HOOK_OK;
140 }
141
142 static void reserve_done(struct userdata *u) {
143 pa_assert(u);
144
145 if (u->reserve_slot) {
146 pa_hook_slot_free(u->reserve_slot);
147 u->reserve_slot = NULL;
148 }
149
150 if (u->reserve) {
151 pa_reserve_wrapper_unref(u->reserve);
152 u->reserve = NULL;
153 }
154 }
155
156 static void reserve_update(struct userdata *u) {
157 const char *description;
158 pa_assert(u);
159
160 if (!u->sink || !u->reserve)
161 return;
162
163 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
164 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
165 }
166
167 static int reserve_init(struct userdata *u, const char *dname) {
168 char *rname;
169
170 pa_assert(u);
171 pa_assert(dname);
172
173 if (u->reserve)
174 return 0;
175
176 if (pa_in_system_mode())
177 return 0;
178
179 if (!(rname = pa_alsa_get_reserve_name(dname)))
180 return 0;
181
182 /* We are resuming, try to lock the device */
183 u->reserve = pa_reserve_wrapper_get(u->core, rname);
184 pa_xfree(rname);
185
186 if (!(u->reserve))
187 return -1;
188
189 reserve_update(u);
190
191 pa_assert(!u->reserve_slot);
192 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
193
194 return 0;
195 }
196
197 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
198 pa_bool_t b;
199
200 pa_assert(w);
201 pa_assert(u);
202
203 b = PA_PTR_TO_UINT(busy) && !u->reserve;
204
205 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
206 return PA_HOOK_OK;
207 }
208
209 static void monitor_done(struct userdata *u) {
210 pa_assert(u);
211
212 if (u->monitor_slot) {
213 pa_hook_slot_free(u->monitor_slot);
214 u->monitor_slot = NULL;
215 }
216
217 if (u->monitor) {
218 pa_reserve_monitor_wrapper_unref(u->monitor);
219 u->monitor = NULL;
220 }
221 }
222
223 static int reserve_monitor_init(struct userdata *u, const char *dname) {
224 char *rname;
225
226 pa_assert(u);
227 pa_assert(dname);
228
229 if (pa_in_system_mode())
230 return 0;
231
232 if (!(rname = pa_alsa_get_reserve_name(dname)))
233 return 0;
234
235 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
236 pa_xfree(rname);
237
238 if (!(u->monitor))
239 return -1;
240
241 pa_assert(!u->monitor_slot);
242 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
243
244 return 0;
245 }
246
247 static void fix_min_sleep_wakeup(struct userdata *u) {
248 size_t max_use, max_use_2;
249
250 pa_assert(u);
251
252 max_use = u->hwbuf_size - u->hwbuf_unused;
253 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
254
255 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
256 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
257
258 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
259 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
260 }
261
262 static void fix_tsched_watermark(struct userdata *u) {
263 size_t max_use;
264 pa_assert(u);
265
266 max_use = u->hwbuf_size - u->hwbuf_unused;
267
268 if (u->tsched_watermark > max_use - u->min_sleep)
269 u->tsched_watermark = max_use - u->min_sleep;
270
271 if (u->tsched_watermark < u->min_wakeup)
272 u->tsched_watermark = u->min_wakeup;
273 }
274
275 static void adjust_after_underrun(struct userdata *u) {
276 size_t old_watermark;
277 pa_usec_t old_min_latency, new_min_latency;
278
279 pa_assert(u);
280 pa_assert(u->use_tsched);
281
282 /* First, just try to increase the watermark */
283 old_watermark = u->tsched_watermark;
284 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
285 fix_tsched_watermark(u);
286
287 if (old_watermark != u->tsched_watermark) {
288 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
289 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
290 return;
291 }
292
293 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
294 old_min_latency = u->sink->thread_info.min_latency;
295 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
296 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
297
298 if (old_min_latency != new_min_latency) {
299 pa_log_notice("Increasing minimal latency to %0.2f ms",
300 (double) new_min_latency / PA_USEC_PER_MSEC);
301
302 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
303 return;
304 }
305
306 /* When we reach this we're officialy fucked! */
307 }
308
309 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
310 pa_usec_t usec, wm;
311
312 pa_assert(sleep_usec);
313 pa_assert(process_usec);
314
315 pa_assert(u);
316
317 usec = pa_sink_get_requested_latency_within_thread(u->sink);
318
319 if (usec == (pa_usec_t) -1)
320 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
321
322 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
323
324 if (wm > usec)
325 wm = usec/2;
326
327 *sleep_usec = usec - wm;
328 *process_usec = wm;
329
330 #ifdef DEBUG_TIMING
331 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
332 (unsigned long) (usec / PA_USEC_PER_MSEC),
333 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
334 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
335 #endif
336 }
337
338 static int try_recover(struct userdata *u, const char *call, int err) {
339 pa_assert(u);
340 pa_assert(call);
341 pa_assert(err < 0);
342
343 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
344
345 pa_assert(err != -EAGAIN);
346
347 if (err == -EPIPE)
348 pa_log_debug("%s: Buffer underrun!", call);
349
350 if (err == -ESTRPIPE)
351 pa_log_debug("%s: System suspended!", call);
352
353 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
354 pa_log("%s: %s", call, pa_alsa_strerror(err));
355 return -1;
356 }
357
358 u->first = TRUE;
359 u->since_start = 0;
360 return 0;
361 }
362
363 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
364 size_t left_to_play;
365
366 /* We use <= instead of < for this check here because an underrun
367 * only happens after the last sample was processed, not already when
368 * it is removed from the buffer. This is particularly important
369 * when block transfer is used. */
370
371 if (n_bytes <= u->hwbuf_size) {
372 left_to_play = u->hwbuf_size - n_bytes;
373
374 #ifdef DEBUG_TIMING
375 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
376 #endif
377
378 } else {
379 left_to_play = 0;
380
381 #ifdef DEBUG_TIMING
382 PA_DEBUG_TRAP;
383 #endif
384
385 if (!u->first && !u->after_rewind) {
386
387 if (pa_log_ratelimit())
388 pa_log_info("Underrun!");
389
390 if (u->use_tsched)
391 adjust_after_underrun(u);
392 }
393 }
394
395 return left_to_play;
396 }
397
398 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
399 pa_bool_t work_done = TRUE;
400 pa_usec_t max_sleep_usec = 0, process_usec = 0;
401 size_t left_to_play;
402 unsigned j = 0;
403
404 pa_assert(u);
405 pa_sink_assert_ref(u->sink);
406
407 if (u->use_tsched)
408 hw_sleep_time(u, &max_sleep_usec, &process_usec);
409
410 for (;;) {
411 snd_pcm_sframes_t n;
412 size_t n_bytes;
413 int r;
414 pa_bool_t after_avail = TRUE;
415
416 /* First we determine how many samples are missing to fill the
417 * buffer up to 100% */
418
419 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
420
421 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
422 continue;
423
424 return r;
425 }
426
427 n_bytes = (size_t) n * u->frame_size;
428
429 #ifdef DEBUG_TIMING
430 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
431 #endif
432
433 left_to_play = check_left_to_play(u, n_bytes);
434
435 if (u->use_tsched)
436
437 /* We won't fill up the playback buffer before at least
438 * half the sleep time is over because otherwise we might
439 * ask for more data from the clients then they expect. We
440 * need to guarantee that clients only have to keep around
441 * a single hw buffer length. */
442
443 if (!polled &&
444 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
445 #ifdef DEBUG_TIMING
446 pa_log_debug("Not filling up, because too early.");
447 #endif
448 break;
449 }
450
451 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
452
453 if (polled)
454 PA_ONCE_BEGIN {
455 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
456 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
457 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
458 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
459 pa_strnull(dn));
460 pa_xfree(dn);
461 } PA_ONCE_END;
462
463 #ifdef DEBUG_TIMING
464 pa_log_debug("Not filling up, because not necessary.");
465 #endif
466 break;
467 }
468
469
470 if (++j > 10) {
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Not filling up, because already too many iterations.");
473 #endif
474
475 break;
476 }
477
478 n_bytes -= u->hwbuf_unused;
479 polled = FALSE;
480
481 #ifdef DEBUG_TIMING
482 pa_log_debug("Filling up");
483 #endif
484
485 for (;;) {
486 pa_memchunk chunk;
487 void *p;
488 int err;
489 const snd_pcm_channel_area_t *areas;
490 snd_pcm_uframes_t offset, frames;
491 snd_pcm_sframes_t sframes;
492
493 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
494 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
495
496 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
497
498 if (!after_avail && err == -EAGAIN)
499 break;
500
501 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
502 continue;
503
504 return r;
505 }
506
507 /* Make sure that if these memblocks need to be copied they will fit into one slot */
508 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
509 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
510
511 if (!after_avail && frames == 0)
512 break;
513
514 pa_assert(frames > 0);
515 after_avail = FALSE;
516
517 /* Check these are multiples of 8 bit */
518 pa_assert((areas[0].first & 7) == 0);
519 pa_assert((areas[0].step & 7)== 0);
520
521 /* We assume a single interleaved memory buffer */
522 pa_assert((areas[0].first >> 3) == 0);
523 pa_assert((areas[0].step >> 3) == u->frame_size);
524
525 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
526
527 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
528 chunk.length = pa_memblock_get_length(chunk.memblock);
529 chunk.index = 0;
530
531 pa_sink_render_into_full(u->sink, &chunk);
532 pa_memblock_unref_fixed(chunk.memblock);
533
534 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
535
536 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
537 continue;
538
539 return r;
540 }
541
542 work_done = TRUE;
543
544 u->write_count += frames * u->frame_size;
545 u->since_start += frames * u->frame_size;
546
547 #ifdef DEBUG_TIMING
548 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
549 #endif
550
551 if ((size_t) frames * u->frame_size >= n_bytes)
552 break;
553
554 n_bytes -= (size_t) frames * u->frame_size;
555 }
556 }
557
558 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
559
560 if (*sleep_usec > process_usec)
561 *sleep_usec -= process_usec;
562 else
563 *sleep_usec = 0;
564
565 return work_done ? 1 : 0;
566 }
567
568 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
569 pa_bool_t work_done = FALSE;
570 pa_usec_t max_sleep_usec = 0, process_usec = 0;
571 size_t left_to_play;
572 unsigned j = 0;
573
574 pa_assert(u);
575 pa_sink_assert_ref(u->sink);
576
577 if (u->use_tsched)
578 hw_sleep_time(u, &max_sleep_usec, &process_usec);
579
580 for (;;) {
581 snd_pcm_sframes_t n;
582 size_t n_bytes;
583 int r;
584
585 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
586
587 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
588 continue;
589
590 return r;
591 }
592
593 n_bytes = (size_t) n * u->frame_size;
594 left_to_play = check_left_to_play(u, n_bytes);
595
596 if (u->use_tsched)
597
598 /* We won't fill up the playback buffer before at least
599 * half the sleep time is over because otherwise we might
600 * ask for more data from the clients then they expect. We
601 * need to guarantee that clients only have to keep around
602 * a single hw buffer length. */
603
604 if (!polled &&
605 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
606 break;
607
608 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
609
610 if (polled)
611 PA_ONCE_BEGIN {
612 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
613 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
614 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
615 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
616 pa_strnull(dn));
617 pa_xfree(dn);
618 } PA_ONCE_END;
619
620 break;
621 }
622
623 if (++j > 10) {
624 #ifdef DEBUG_TIMING
625 pa_log_debug("Not filling up, because already too many iterations.");
626 #endif
627
628 break;
629 }
630
631 n_bytes -= u->hwbuf_unused;
632 polled = FALSE;
633
634 for (;;) {
635 snd_pcm_sframes_t frames;
636 void *p;
637 pa_bool_t after_avail = TRUE;
638
639 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
640
641 if (u->memchunk.length <= 0)
642 pa_sink_render(u->sink, n_bytes, &u->memchunk);
643
644 pa_assert(u->memchunk.length > 0);
645
646 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
647
648 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
649 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
650
651 p = pa_memblock_acquire(u->memchunk.memblock);
652 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
653 pa_memblock_release(u->memchunk.memblock);
654
655 if (PA_UNLIKELY(frames < 0)) {
656
657 if (!after_avail && (int) frames == -EAGAIN)
658 break;
659
660 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
661 continue;
662
663 return r;
664 }
665
666 if (!after_avail && frames == 0)
667 break;
668
669 pa_assert(frames > 0);
670 after_avail = FALSE;
671
672 u->memchunk.index += (size_t) frames * u->frame_size;
673 u->memchunk.length -= (size_t) frames * u->frame_size;
674
675 if (u->memchunk.length <= 0) {
676 pa_memblock_unref(u->memchunk.memblock);
677 pa_memchunk_reset(&u->memchunk);
678 }
679
680 work_done = TRUE;
681
682 u->write_count += frames * u->frame_size;
683 u->since_start += frames * u->frame_size;
684
685 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
686
687 if ((size_t) frames * u->frame_size >= n_bytes)
688 break;
689
690 n_bytes -= (size_t) frames * u->frame_size;
691 }
692 }
693
694 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
695
696 if (*sleep_usec > process_usec)
697 *sleep_usec -= process_usec;
698 else
699 *sleep_usec = 0;
700
701 return work_done ? 1 : 0;
702 }
703
704 static void update_smoother(struct userdata *u) {
705 snd_pcm_sframes_t delay = 0;
706 int64_t position;
707 int err;
708 pa_usec_t now1 = 0, now2;
709 snd_pcm_status_t *status;
710
711 snd_pcm_status_alloca(&status);
712
713 pa_assert(u);
714 pa_assert(u->pcm_handle);
715
716 /* Let's update the time smoother */
717
718 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
719 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
720 return;
721 }
722
723 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
724 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
725 else {
726 snd_htimestamp_t htstamp = { 0, 0 };
727 snd_pcm_status_get_htstamp(status, &htstamp);
728 now1 = pa_timespec_load(&htstamp);
729 }
730
731 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
732 if (now1 <= 0)
733 now1 = pa_rtclock_now();
734
735 /* check if the time since the last update is bigger than the interval */
736 if (u->last_smoother_update > 0)
737 if (u->last_smoother_update + u->smoother_interval > now1)
738 return;
739
740 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
741
742 if (PA_UNLIKELY(position < 0))
743 position = 0;
744
745 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
746
747 pa_smoother_put(u->smoother, now1, now2);
748
749 u->last_smoother_update = now1;
750 /* exponentially increase the update interval up to the MAX limit */
751 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
752 }
753
754 static pa_usec_t sink_get_latency(struct userdata *u) {
755 pa_usec_t r;
756 int64_t delay;
757 pa_usec_t now1, now2;
758
759 pa_assert(u);
760
761 now1 = pa_rtclock_now();
762 now2 = pa_smoother_get(u->smoother, now1);
763
764 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
765
766 r = delay >= 0 ? (pa_usec_t) delay : 0;
767
768 if (u->memchunk.memblock)
769 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
770
771 return r;
772 }
773
774 static int build_pollfd(struct userdata *u) {
775 pa_assert(u);
776 pa_assert(u->pcm_handle);
777
778 if (u->alsa_rtpoll_item)
779 pa_rtpoll_item_free(u->alsa_rtpoll_item);
780
781 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
782 return -1;
783
784 return 0;
785 }
786
787 /* Called from IO context */
788 static int suspend(struct userdata *u) {
789 pa_assert(u);
790 pa_assert(u->pcm_handle);
791
792 pa_smoother_pause(u->smoother, pa_rtclock_now());
793
794 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
795 * take awfully long with our long buffer sizes today. */
796 snd_pcm_close(u->pcm_handle);
797 u->pcm_handle = NULL;
798
799 if (u->alsa_rtpoll_item) {
800 pa_rtpoll_item_free(u->alsa_rtpoll_item);
801 u->alsa_rtpoll_item = NULL;
802 }
803
804 pa_log_info("Device suspended...");
805
806 return 0;
807 }
808
809 /* Called from IO context */
810 static int update_sw_params(struct userdata *u) {
811 snd_pcm_uframes_t avail_min;
812 int err;
813
814 pa_assert(u);
815
816 /* Use the full buffer if noone asked us for anything specific */
817 u->hwbuf_unused = 0;
818
819 if (u->use_tsched) {
820 pa_usec_t latency;
821
822 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
823 size_t b;
824
825 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
826
827 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
828
829 /* We need at least one sample in our buffer */
830
831 if (PA_UNLIKELY(b < u->frame_size))
832 b = u->frame_size;
833
834 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
835 }
836
837 fix_min_sleep_wakeup(u);
838 fix_tsched_watermark(u);
839 }
840
841 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
842
843 /* We need at last one frame in the used part of the buffer */
844 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
845
846 if (u->use_tsched) {
847 pa_usec_t sleep_usec, process_usec;
848
849 hw_sleep_time(u, &sleep_usec, &process_usec);
850 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
851 }
852
853 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
854
855 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
856 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
857 return err;
858 }
859
860 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
861
862 return 0;
863 }
864
865 /* Called from IO context */
866 static int unsuspend(struct userdata *u) {
867 pa_sample_spec ss;
868 int err;
869 pa_bool_t b, d;
870 unsigned nfrags;
871 snd_pcm_uframes_t period_size;
872
873 pa_assert(u);
874 pa_assert(!u->pcm_handle);
875
876 pa_log_info("Trying resume...");
877
878 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
879 /*SND_PCM_NONBLOCK|*/
880 SND_PCM_NO_AUTO_RESAMPLE|
881 SND_PCM_NO_AUTO_CHANNELS|
882 SND_PCM_NO_AUTO_FORMAT)) < 0) {
883 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
884 goto fail;
885 }
886
887 ss = u->sink->sample_spec;
888 nfrags = u->nfragments;
889 period_size = u->fragment_size / u->frame_size;
890 b = u->use_mmap;
891 d = u->use_tsched;
892
893 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
894 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
895 goto fail;
896 }
897
898 if (b != u->use_mmap || d != u->use_tsched) {
899 pa_log_warn("Resume failed, couldn't get original access mode.");
900 goto fail;
901 }
902
903 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
904 pa_log_warn("Resume failed, couldn't restore original sample settings.");
905 goto fail;
906 }
907
908 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
909 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
910 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
911 (unsigned long) nfrags, period_size * u->frame_size);
912 goto fail;
913 }
914
915 if (update_sw_params(u) < 0)
916 goto fail;
917
918 if (build_pollfd(u) < 0)
919 goto fail;
920
921 u->write_count = 0;
922 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
923 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
924 u->last_smoother_update = 0;
925
926 u->first = TRUE;
927 u->since_start = 0;
928
929 pa_log_info("Resumed successfully...");
930
931 return 0;
932
933 fail:
934 if (u->pcm_handle) {
935 snd_pcm_close(u->pcm_handle);
936 u->pcm_handle = NULL;
937 }
938
939 return -1;
940 }
941
942 /* Called from IO context */
943 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
944 struct userdata *u = PA_SINK(o)->userdata;
945
946 switch (code) {
947
948 case PA_SINK_MESSAGE_GET_LATENCY: {
949 pa_usec_t r = 0;
950
951 if (u->pcm_handle)
952 r = sink_get_latency(u);
953
954 *((pa_usec_t*) data) = r;
955
956 return 0;
957 }
958
959 case PA_SINK_MESSAGE_SET_STATE:
960
961 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
962
963 case PA_SINK_SUSPENDED:
964 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
965
966 if (suspend(u) < 0)
967 return -1;
968
969 break;
970
971 case PA_SINK_IDLE:
972 case PA_SINK_RUNNING:
973
974 if (u->sink->thread_info.state == PA_SINK_INIT) {
975 if (build_pollfd(u) < 0)
976 return -1;
977 }
978
979 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
980 if (unsuspend(u) < 0)
981 return -1;
982 }
983
984 break;
985
986 case PA_SINK_UNLINKED:
987 case PA_SINK_INIT:
988 case PA_SINK_INVALID_STATE:
989 ;
990 }
991
992 break;
993 }
994
995 return pa_sink_process_msg(o, code, data, offset, chunk);
996 }
997
998 /* Called from main context */
999 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1000 pa_sink_state_t old_state;
1001 struct userdata *u;
1002
1003 pa_sink_assert_ref(s);
1004 pa_assert_se(u = s->userdata);
1005
1006 old_state = pa_sink_get_state(u->sink);
1007
1008 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1009 reserve_done(u);
1010 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1011 if (reserve_init(u, u->device_name) < 0)
1012 return -1;
1013
1014 return 0;
1015 }
1016
1017 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1018 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1019
1020 pa_assert(u);
1021 pa_assert(u->mixer_handle);
1022
1023 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1024 return 0;
1025
1026 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1027 pa_sink_get_volume(u->sink, TRUE);
1028 pa_sink_get_mute(u->sink, TRUE);
1029 }
1030
1031 return 0;
1032 }
1033
1034 static void sink_get_volume_cb(pa_sink *s) {
1035 struct userdata *u = s->userdata;
1036 pa_cvolume r;
1037 char t[PA_CVOLUME_SNPRINT_MAX];
1038
1039 pa_assert(u);
1040 pa_assert(u->mixer_path);
1041 pa_assert(u->mixer_handle);
1042
1043 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1044 return;
1045
1046 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1047 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1048
1049 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1050
1051 if (pa_cvolume_equal(&u->hardware_volume, &r))
1052 return;
1053
1054 s->real_volume = u->hardware_volume = r;
1055
1056 /* Hmm, so the hardware volume changed, let's reset our software volume */
1057 if (u->mixer_path->has_dB)
1058 pa_sink_set_soft_volume(s, NULL);
1059 }
1060
1061 static void sink_set_volume_cb(pa_sink *s) {
1062 struct userdata *u = s->userdata;
1063 pa_cvolume r;
1064 char t[PA_CVOLUME_SNPRINT_MAX];
1065
1066 pa_assert(u);
1067 pa_assert(u->mixer_path);
1068 pa_assert(u->mixer_handle);
1069
1070 /* Shift up by the base volume */
1071 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1072
1073 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1074 return;
1075
1076 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1077 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1078
1079 u->hardware_volume = r;
1080
1081 if (u->mixer_path->has_dB) {
1082 pa_cvolume new_soft_volume;
1083 pa_bool_t accurate_enough;
1084
1085 /* Match exactly what the user requested by software */
1086 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1087
1088 /* If the adjustment to do in software is only minimal we
1089 * can skip it. That saves us CPU at the expense of a bit of
1090 * accuracy */
1091 accurate_enough =
1092 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1093 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1094
1095 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1096 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1097 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1098 pa_yes_no(accurate_enough));
1099
1100 if (!accurate_enough)
1101 s->soft_volume = new_soft_volume;
1102
1103 } else {
1104 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1105
1106 /* We can't match exactly what the user requested, hence let's
1107 * at least tell the user about it */
1108
1109 s->real_volume = r;
1110 }
1111 }
1112
1113 static void sink_get_mute_cb(pa_sink *s) {
1114 struct userdata *u = s->userdata;
1115 pa_bool_t b;
1116
1117 pa_assert(u);
1118 pa_assert(u->mixer_path);
1119 pa_assert(u->mixer_handle);
1120
1121 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1122 return;
1123
1124 s->muted = b;
1125 }
1126
1127 static void sink_set_mute_cb(pa_sink *s) {
1128 struct userdata *u = s->userdata;
1129
1130 pa_assert(u);
1131 pa_assert(u->mixer_path);
1132 pa_assert(u->mixer_handle);
1133
1134 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1135 }
1136
1137 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1138 struct userdata *u = s->userdata;
1139 pa_alsa_port_data *data;
1140
1141 pa_assert(u);
1142 pa_assert(p);
1143 pa_assert(u->mixer_handle);
1144
1145 data = PA_DEVICE_PORT_DATA(p);
1146
1147 pa_assert_se(u->mixer_path = data->path);
1148 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1149
1150 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1151 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1152 s->n_volume_steps = PA_VOLUME_NORM+1;
1153
1154 if (u->mixer_path->max_dB > 0.0)
1155 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1156 else
1157 pa_log_info("No particular base volume set, fixing to 0 dB");
1158 } else {
1159 s->base_volume = PA_VOLUME_NORM;
1160 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1161 }
1162
1163 if (data->setting)
1164 pa_alsa_setting_select(data->setting, u->mixer_handle);
1165
1166 if (s->set_mute)
1167 s->set_mute(s);
1168 if (s->set_volume)
1169 s->set_volume(s);
1170
1171 return 0;
1172 }
1173
1174 static void sink_update_requested_latency_cb(pa_sink *s) {
1175 struct userdata *u = s->userdata;
1176 size_t before;
1177 pa_assert(u);
1178
1179 if (!u->pcm_handle)
1180 return;
1181
1182 before = u->hwbuf_unused;
1183 update_sw_params(u);
1184
1185 /* Let's check whether we now use only a smaller part of the
1186 buffer then before. If so, we need to make sure that subsequent
1187 rewinds are relative to the new maximum fill level and not to the
1188 current fill level. Thus, let's do a full rewind once, to clear
1189 things up. */
1190
1191 if (u->hwbuf_unused > before) {
1192 pa_log_debug("Requesting rewind due to latency change.");
1193 pa_sink_request_rewind(s, (size_t) -1);
1194 }
1195 }
1196
1197 static int process_rewind(struct userdata *u) {
1198 snd_pcm_sframes_t unused;
1199 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1200 pa_assert(u);
1201
1202 /* Figure out how much we shall rewind and reset the counter */
1203 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1204
1205 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1206
1207 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1208 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1209 return -1;
1210 }
1211
1212 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1213
1214 if (u->hwbuf_size > unused_nbytes)
1215 limit_nbytes = u->hwbuf_size - unused_nbytes;
1216 else
1217 limit_nbytes = 0;
1218
1219 if (rewind_nbytes > limit_nbytes)
1220 rewind_nbytes = limit_nbytes;
1221
1222 if (rewind_nbytes > 0) {
1223 snd_pcm_sframes_t in_frames, out_frames;
1224
1225 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1226
1227 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1228 pa_log_debug("before: %lu", (unsigned long) in_frames);
1229 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1230 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1231 if (try_recover(u, "process_rewind", out_frames) < 0)
1232 return -1;
1233 out_frames = 0;
1234 }
1235
1236 pa_log_debug("after: %lu", (unsigned long) out_frames);
1237
1238 rewind_nbytes = (size_t) out_frames * u->frame_size;
1239
1240 if (rewind_nbytes <= 0)
1241 pa_log_info("Tried rewind, but was apparently not possible.");
1242 else {
1243 u->write_count -= rewind_nbytes;
1244 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1245 pa_sink_process_rewind(u->sink, rewind_nbytes);
1246
1247 u->after_rewind = TRUE;
1248 return 0;
1249 }
1250 } else
1251 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1252
1253 pa_sink_process_rewind(u->sink, 0);
1254 return 0;
1255 }
1256
1257 static void thread_func(void *userdata) {
1258 struct userdata *u = userdata;
1259 unsigned short revents = 0;
1260
1261 pa_assert(u);
1262
1263 pa_log_debug("Thread starting up");
1264
1265 if (u->core->realtime_scheduling)
1266 pa_make_realtime(u->core->realtime_priority);
1267
1268 pa_thread_mq_install(&u->thread_mq);
1269
1270 for (;;) {
1271 int ret;
1272
1273 #ifdef DEBUG_TIMING
1274 pa_log_debug("Loop");
1275 #endif
1276
1277 /* Render some data and write it to the dsp */
1278 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1279 int work_done;
1280 pa_usec_t sleep_usec = 0;
1281
1282 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1283 if (process_rewind(u) < 0)
1284 goto fail;
1285
1286 if (u->use_mmap)
1287 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1288 else
1289 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1290
1291 if (work_done < 0)
1292 goto fail;
1293
1294 /* pa_log_debug("work_done = %i", work_done); */
1295
1296 if (work_done) {
1297
1298 if (u->first) {
1299 pa_log_info("Starting playback.");
1300 snd_pcm_start(u->pcm_handle);
1301
1302 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1303 }
1304
1305 update_smoother(u);
1306 }
1307
1308 if (u->use_tsched) {
1309 pa_usec_t cusec;
1310
1311 if (u->since_start <= u->hwbuf_size) {
1312
1313 /* USB devices on ALSA seem to hit a buffer
1314 * underrun during the first iterations much
1315 * quicker then we calculate here, probably due to
1316 * the transport latency. To accommodate for that
1317 * we artificially decrease the sleep time until
1318 * we have filled the buffer at least once
1319 * completely.*/
1320
1321 if (pa_log_ratelimit())
1322 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1323 sleep_usec /= 2;
1324 }
1325
1326 /* OK, the playback buffer is now full, let's
1327 * calculate when to wake up next */
1328 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1329
1330 /* Convert from the sound card time domain to the
1331 * system time domain */
1332 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1333
1334 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1335
1336 /* We don't trust the conversion, so we wake up whatever comes first */
1337 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1338 }
1339
1340 u->first = FALSE;
1341 u->after_rewind = FALSE;
1342
1343 } else if (u->use_tsched)
1344
1345 /* OK, we're in an invalid state, let's disable our timers */
1346 pa_rtpoll_set_timer_disabled(u->rtpoll);
1347
1348 /* Hmm, nothing to do. Let's sleep */
1349 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1350 goto fail;
1351
1352 if (ret == 0)
1353 goto finish;
1354
1355 /* Tell ALSA about this and process its response */
1356 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1357 struct pollfd *pollfd;
1358 int err;
1359 unsigned n;
1360
1361 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1362
1363 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1364 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1365 goto fail;
1366 }
1367
1368 if (revents & ~POLLOUT) {
1369 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1370 goto fail;
1371
1372 u->first = TRUE;
1373 u->since_start = 0;
1374 } else if (revents && u->use_tsched && pa_log_ratelimit())
1375 pa_log_debug("Wakeup from ALSA!");
1376
1377 } else
1378 revents = 0;
1379 }
1380
1381 fail:
1382 /* If this was no regular exit from the loop we have to continue
1383 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1384 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1385 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1386
1387 finish:
1388 pa_log_debug("Thread shutting down");
1389 }
1390
1391 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1392 const char *n;
1393 char *t;
1394
1395 pa_assert(data);
1396 pa_assert(ma);
1397 pa_assert(device_name);
1398
1399 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1400 pa_sink_new_data_set_name(data, n);
1401 data->namereg_fail = TRUE;
1402 return;
1403 }
1404
1405 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1406 data->namereg_fail = TRUE;
1407 else {
1408 n = device_id ? device_id : device_name;
1409 data->namereg_fail = FALSE;
1410 }
1411
1412 if (mapping)
1413 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1414 else
1415 t = pa_sprintf_malloc("alsa_output.%s", n);
1416
1417 pa_sink_new_data_set_name(data, t);
1418 pa_xfree(t);
1419 }
1420
1421 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1422
1423 if (!mapping && !element)
1424 return;
1425
1426 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1427 pa_log_info("Failed to find a working mixer device.");
1428 return;
1429 }
1430
1431 if (element) {
1432
1433 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1434 goto fail;
1435
1436 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1437 goto fail;
1438
1439 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1440 pa_alsa_path_dump(u->mixer_path);
1441 } else {
1442
1443 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1444 goto fail;
1445
1446 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1447
1448 pa_log_debug("Probed mixer paths:");
1449 pa_alsa_path_set_dump(u->mixer_path_set);
1450 }
1451
1452 return;
1453
1454 fail:
1455
1456 if (u->mixer_path_set) {
1457 pa_alsa_path_set_free(u->mixer_path_set);
1458 u->mixer_path_set = NULL;
1459 } else if (u->mixer_path) {
1460 pa_alsa_path_free(u->mixer_path);
1461 u->mixer_path = NULL;
1462 }
1463
1464 if (u->mixer_handle) {
1465 snd_mixer_close(u->mixer_handle);
1466 u->mixer_handle = NULL;
1467 }
1468 }
1469
1470 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1471 pa_assert(u);
1472
1473 if (!u->mixer_handle)
1474 return 0;
1475
1476 if (u->sink->active_port) {
1477 pa_alsa_port_data *data;
1478
1479 /* We have a list of supported paths, so let's activate the
1480 * one that has been chosen as active */
1481
1482 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1483 u->mixer_path = data->path;
1484
1485 pa_alsa_path_select(data->path, u->mixer_handle);
1486
1487 if (data->setting)
1488 pa_alsa_setting_select(data->setting, u->mixer_handle);
1489
1490 } else {
1491
1492 if (!u->mixer_path && u->mixer_path_set)
1493 u->mixer_path = u->mixer_path_set->paths;
1494
1495 if (u->mixer_path) {
1496 /* Hmm, we have only a single path, then let's activate it */
1497
1498 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1499
1500 if (u->mixer_path->settings)
1501 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1502 } else
1503 return 0;
1504 }
1505
1506 if (!u->mixer_path->has_volume)
1507 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1508 else {
1509
1510 if (u->mixer_path->has_dB) {
1511 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1512
1513 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1514 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1515
1516 if (u->mixer_path->max_dB > 0.0)
1517 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1518 else
1519 pa_log_info("No particular base volume set, fixing to 0 dB");
1520
1521 } else {
1522 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1523 u->sink->base_volume = PA_VOLUME_NORM;
1524 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1525 }
1526
1527 u->sink->get_volume = sink_get_volume_cb;
1528 u->sink->set_volume = sink_set_volume_cb;
1529
1530 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1531 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1532 }
1533
1534 if (!u->mixer_path->has_mute) {
1535 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1536 } else {
1537 u->sink->get_mute = sink_get_mute_cb;
1538 u->sink->set_mute = sink_set_mute_cb;
1539 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1540 pa_log_info("Using hardware mute control.");
1541 }
1542
1543 u->mixer_fdl = pa_alsa_fdlist_new();
1544
1545 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1546 pa_log("Failed to initialize file descriptor monitoring");
1547 return -1;
1548 }
1549
1550 if (u->mixer_path_set)
1551 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1552 else
1553 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1554
1555 return 0;
1556 }
1557
1558 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1559
1560 struct userdata *u = NULL;
1561 const char *dev_id = NULL;
1562 pa_sample_spec ss, requested_ss;
1563 pa_channel_map map;
1564 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1565 snd_pcm_uframes_t period_frames, tsched_frames;
1566 size_t frame_size;
1567 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1568 pa_sink_new_data data;
1569 pa_alsa_profile_set *profile_set = NULL;
1570
1571 pa_assert(m);
1572 pa_assert(ma);
1573
1574 ss = m->core->default_sample_spec;
1575 map = m->core->default_channel_map;
1576 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1577 pa_log("Failed to parse sample specification and channel map");
1578 goto fail;
1579 }
1580
1581 requested_ss = ss;
1582 frame_size = pa_frame_size(&ss);
1583
1584 nfrags = m->core->default_n_fragments;
1585 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1586 if (frag_size <= 0)
1587 frag_size = (uint32_t) frame_size;
1588 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1589 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1590
1591 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1592 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1593 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1594 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1595 pa_log("Failed to parse buffer metrics");
1596 goto fail;
1597 }
1598
1599 hwbuf_size = frag_size * nfrags;
1600 period_frames = frag_size/frame_size;
1601 tsched_frames = tsched_size/frame_size;
1602
1603 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1604 pa_log("Failed to parse mmap argument.");
1605 goto fail;
1606 }
1607
1608 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1609 pa_log("Failed to parse tsched argument.");
1610 goto fail;
1611 }
1612
1613 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1614 pa_log("Failed to parse ignore_dB argument.");
1615 goto fail;
1616 }
1617
1618 if (use_tsched && !pa_rtclock_hrtimer()) {
1619 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1620 use_tsched = FALSE;
1621 }
1622
1623 u = pa_xnew0(struct userdata, 1);
1624 u->core = m->core;
1625 u->module = m;
1626 u->use_mmap = use_mmap;
1627 u->use_tsched = use_tsched;
1628 u->first = TRUE;
1629 u->rtpoll = pa_rtpoll_new();
1630 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1631
1632 u->smoother = pa_smoother_new(
1633 DEFAULT_TSCHED_BUFFER_USEC*2,
1634 DEFAULT_TSCHED_BUFFER_USEC*2,
1635 TRUE,
1636 TRUE,
1637 5,
1638 pa_rtclock_now(),
1639 TRUE);
1640 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1641
1642 dev_id = pa_modargs_get_value(
1643 ma, "device_id",
1644 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1645
1646 if (reserve_init(u, dev_id) < 0)
1647 goto fail;
1648
1649 if (reserve_monitor_init(u, dev_id) < 0)
1650 goto fail;
1651
1652 b = use_mmap;
1653 d = use_tsched;
1654
1655 if (mapping) {
1656
1657 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1658 pa_log("device_id= not set");
1659 goto fail;
1660 }
1661
1662 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1663 dev_id,
1664 &u->device_name,
1665 &ss, &map,
1666 SND_PCM_STREAM_PLAYBACK,
1667 &nfrags, &period_frames, tsched_frames,
1668 &b, &d, mapping)))
1669
1670 goto fail;
1671
1672 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1673
1674 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1675 goto fail;
1676
1677 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1678 dev_id,
1679 &u->device_name,
1680 &ss, &map,
1681 SND_PCM_STREAM_PLAYBACK,
1682 &nfrags, &period_frames, tsched_frames,
1683 &b, &d, profile_set, &mapping)))
1684
1685 goto fail;
1686
1687 } else {
1688
1689 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1690 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1691 &u->device_name,
1692 &ss, &map,
1693 SND_PCM_STREAM_PLAYBACK,
1694 &nfrags, &period_frames, tsched_frames,
1695 &b, &d, FALSE)))
1696 goto fail;
1697 }
1698
1699 pa_assert(u->device_name);
1700 pa_log_info("Successfully opened device %s.", u->device_name);
1701
1702 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1703 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1704 goto fail;
1705 }
1706
1707 if (mapping)
1708 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1709
1710 if (use_mmap && !b) {
1711 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1712 u->use_mmap = use_mmap = FALSE;
1713 }
1714
1715 if (use_tsched && (!b || !d)) {
1716 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1717 u->use_tsched = use_tsched = FALSE;
1718 }
1719
1720 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1721 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1722 u->use_tsched = use_tsched = FALSE;
1723 }
1724
1725 if (u->use_mmap)
1726 pa_log_info("Successfully enabled mmap() mode.");
1727
1728 if (u->use_tsched)
1729 pa_log_info("Successfully enabled timer-based scheduling mode.");
1730
1731 /* ALSA might tweak the sample spec, so recalculate the frame size */
1732 frame_size = pa_frame_size(&ss);
1733
1734 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1735
1736 pa_sink_new_data_init(&data);
1737 data.driver = driver;
1738 data.module = m;
1739 data.card = card;
1740 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1741 pa_sink_new_data_set_sample_spec(&data, &ss);
1742 pa_sink_new_data_set_channel_map(&data, &map);
1743
1744 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1745 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1746 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1747 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1748 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1749
1750 if (mapping) {
1751 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1752 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1753 }
1754
1755 pa_alsa_init_description(data.proplist);
1756
1757 if (u->control_device)
1758 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1759
1760 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1761 pa_log("Invalid properties");
1762 pa_sink_new_data_done(&data);
1763 goto fail;
1764 }
1765
1766 if (u->mixer_path_set)
1767 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1768
1769 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1770 pa_sink_new_data_done(&data);
1771
1772 if (!u->sink) {
1773 pa_log("Failed to create sink object");
1774 goto fail;
1775 }
1776
1777 u->sink->parent.process_msg = sink_process_msg;
1778 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1779 u->sink->set_state = sink_set_state_cb;
1780 u->sink->set_port = sink_set_port_cb;
1781 u->sink->userdata = u;
1782
1783 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1784 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1785
1786 u->frame_size = frame_size;
1787 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1788 u->nfragments = nfrags;
1789 u->hwbuf_size = u->fragment_size * nfrags;
1790 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1791 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1792
1793 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1794 nfrags, (long unsigned) u->fragment_size,
1795 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1796
1797 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1798 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1799
1800 if (u->use_tsched) {
1801 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1802
1803 fix_min_sleep_wakeup(u);
1804 fix_tsched_watermark(u);
1805
1806 pa_sink_set_latency_range(u->sink,
1807 0,
1808 pa_bytes_to_usec(u->hwbuf_size, &ss));
1809
1810 pa_log_info("Time scheduling watermark is %0.2fms",
1811 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1812 } else
1813 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1814
1815 reserve_update(u);
1816
1817 if (update_sw_params(u) < 0)
1818 goto fail;
1819
1820 if (setup_mixer(u, ignore_dB) < 0)
1821 goto fail;
1822
1823 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1824
1825 if (!(u->thread = pa_thread_new(thread_func, u))) {
1826 pa_log("Failed to create thread.");
1827 goto fail;
1828 }
1829
1830 /* Get initial mixer settings */
1831 if (data.volume_is_set) {
1832 if (u->sink->set_volume)
1833 u->sink->set_volume(u->sink);
1834 } else {
1835 if (u->sink->get_volume)
1836 u->sink->get_volume(u->sink);
1837 }
1838
1839 if (data.muted_is_set) {
1840 if (u->sink->set_mute)
1841 u->sink->set_mute(u->sink);
1842 } else {
1843 if (u->sink->get_mute)
1844 u->sink->get_mute(u->sink);
1845 }
1846
1847 pa_sink_put(u->sink);
1848
1849 if (profile_set)
1850 pa_alsa_profile_set_free(profile_set);
1851
1852 return u->sink;
1853
1854 fail:
1855
1856 if (u)
1857 userdata_free(u);
1858
1859 if (profile_set)
1860 pa_alsa_profile_set_free(profile_set);
1861
1862 return NULL;
1863 }
1864
1865 static void userdata_free(struct userdata *u) {
1866 pa_assert(u);
1867
1868 if (u->sink)
1869 pa_sink_unlink(u->sink);
1870
1871 if (u->thread) {
1872 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1873 pa_thread_free(u->thread);
1874 }
1875
1876 pa_thread_mq_done(&u->thread_mq);
1877
1878 if (u->sink)
1879 pa_sink_unref(u->sink);
1880
1881 if (u->memchunk.memblock)
1882 pa_memblock_unref(u->memchunk.memblock);
1883
1884 if (u->alsa_rtpoll_item)
1885 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1886
1887 if (u->rtpoll)
1888 pa_rtpoll_free(u->rtpoll);
1889
1890 if (u->pcm_handle) {
1891 snd_pcm_drop(u->pcm_handle);
1892 snd_pcm_close(u->pcm_handle);
1893 }
1894
1895 if (u->mixer_fdl)
1896 pa_alsa_fdlist_free(u->mixer_fdl);
1897
1898 if (u->mixer_path_set)
1899 pa_alsa_path_set_free(u->mixer_path_set);
1900 else if (u->mixer_path)
1901 pa_alsa_path_free(u->mixer_path);
1902
1903 if (u->mixer_handle)
1904 snd_mixer_close(u->mixer_handle);
1905
1906 if (u->smoother)
1907 pa_smoother_free(u->smoother);
1908
1909 reserve_done(u);
1910 monitor_done(u);
1911
1912 pa_xfree(u->device_name);
1913 pa_xfree(u->control_device);
1914 pa_xfree(u);
1915 }
1916
1917 void pa_alsa_sink_free(pa_sink *s) {
1918 struct userdata *u;
1919
1920 pa_sink_assert_ref(s);
1921 pa_assert_se(u = s->userdata);
1922
1923 userdata_free(u);
1924 }