]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa-sink: reduce the amount of smoother updates
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */
72 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */
73
74 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
75
76 struct userdata {
77 pa_core *core;
78 pa_module *module;
79 pa_sink *sink;
80
81 pa_thread *thread;
82 pa_thread_mq thread_mq;
83 pa_rtpoll *rtpoll;
84
85 snd_pcm_t *pcm_handle;
86
87 pa_alsa_fdlist *mixer_fdl;
88 snd_mixer_t *mixer_handle;
89 pa_alsa_path_set *mixer_path_set;
90 pa_alsa_path *mixer_path;
91
92 pa_cvolume hardware_volume;
93
94 size_t
95 frame_size,
96 fragment_size,
97 hwbuf_size,
98 tsched_watermark,
99 hwbuf_unused,
100 min_sleep,
101 min_wakeup,
102 watermark_step;
103
104 unsigned nfragments;
105 pa_memchunk memchunk;
106
107 char *device_name; /* name of the PCM device */
108 char *control_device; /* name of the control device */
109
110 pa_bool_t use_mmap:1, use_tsched:1;
111
112 pa_bool_t first, after_rewind;
113
114 pa_rtpoll_item *alsa_rtpoll_item;
115
116 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
117
118 pa_smoother *smoother;
119 uint64_t write_count;
120 uint64_t since_start;
121 pa_usec_t smoother_interval;
122 pa_usec_t last_smoother_update;
123
124 pa_reserve_wrapper *reserve;
125 pa_hook_slot *reserve_slot;
126 pa_reserve_monitor_wrapper *monitor;
127 pa_hook_slot *monitor_slot;
128 };
129
130 static void userdata_free(struct userdata *u);
131
132 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
133 pa_assert(r);
134 pa_assert(u);
135
136 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
137 return PA_HOOK_CANCEL;
138
139 return PA_HOOK_OK;
140 }
141
142 static void reserve_done(struct userdata *u) {
143 pa_assert(u);
144
145 if (u->reserve_slot) {
146 pa_hook_slot_free(u->reserve_slot);
147 u->reserve_slot = NULL;
148 }
149
150 if (u->reserve) {
151 pa_reserve_wrapper_unref(u->reserve);
152 u->reserve = NULL;
153 }
154 }
155
156 static void reserve_update(struct userdata *u) {
157 const char *description;
158 pa_assert(u);
159
160 if (!u->sink || !u->reserve)
161 return;
162
163 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
164 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
165 }
166
167 static int reserve_init(struct userdata *u, const char *dname) {
168 char *rname;
169
170 pa_assert(u);
171 pa_assert(dname);
172
173 if (u->reserve)
174 return 0;
175
176 if (pa_in_system_mode())
177 return 0;
178
179 if (!(rname = pa_alsa_get_reserve_name(dname)))
180 return 0;
181
182 /* We are resuming, try to lock the device */
183 u->reserve = pa_reserve_wrapper_get(u->core, rname);
184 pa_xfree(rname);
185
186 if (!(u->reserve))
187 return -1;
188
189 reserve_update(u);
190
191 pa_assert(!u->reserve_slot);
192 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
193
194 return 0;
195 }
196
197 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
198 pa_bool_t b;
199
200 pa_assert(w);
201 pa_assert(u);
202
203 b = PA_PTR_TO_UINT(busy) && !u->reserve;
204
205 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
206 return PA_HOOK_OK;
207 }
208
209 static void monitor_done(struct userdata *u) {
210 pa_assert(u);
211
212 if (u->monitor_slot) {
213 pa_hook_slot_free(u->monitor_slot);
214 u->monitor_slot = NULL;
215 }
216
217 if (u->monitor) {
218 pa_reserve_monitor_wrapper_unref(u->monitor);
219 u->monitor = NULL;
220 }
221 }
222
223 static int reserve_monitor_init(struct userdata *u, const char *dname) {
224 char *rname;
225
226 pa_assert(u);
227 pa_assert(dname);
228
229 if (pa_in_system_mode())
230 return 0;
231
232 if (!(rname = pa_alsa_get_reserve_name(dname)))
233 return 0;
234
235 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
236 pa_xfree(rname);
237
238 if (!(u->monitor))
239 return -1;
240
241 pa_assert(!u->monitor_slot);
242 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
243
244 return 0;
245 }
246
247 static void fix_min_sleep_wakeup(struct userdata *u) {
248 size_t max_use, max_use_2;
249
250 pa_assert(u);
251
252 max_use = u->hwbuf_size - u->hwbuf_unused;
253 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
254
255 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
256 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
257
258 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
259 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
260 }
261
262 static void fix_tsched_watermark(struct userdata *u) {
263 size_t max_use;
264 pa_assert(u);
265
266 max_use = u->hwbuf_size - u->hwbuf_unused;
267
268 if (u->tsched_watermark > max_use - u->min_sleep)
269 u->tsched_watermark = max_use - u->min_sleep;
270
271 if (u->tsched_watermark < u->min_wakeup)
272 u->tsched_watermark = u->min_wakeup;
273 }
274
275 static void adjust_after_underrun(struct userdata *u) {
276 size_t old_watermark;
277 pa_usec_t old_min_latency, new_min_latency;
278
279 pa_assert(u);
280 pa_assert(u->use_tsched);
281
282 /* First, just try to increase the watermark */
283 old_watermark = u->tsched_watermark;
284 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
285 fix_tsched_watermark(u);
286
287 if (old_watermark != u->tsched_watermark) {
288 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
289 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
290 return;
291 }
292
293 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
294 old_min_latency = u->sink->thread_info.min_latency;
295 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
296 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
297
298 if (old_min_latency != new_min_latency) {
299 pa_log_notice("Increasing minimal latency to %0.2f ms",
300 (double) new_min_latency / PA_USEC_PER_MSEC);
301
302 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
303 return;
304 }
305
306 /* When we reach this we're officialy fucked! */
307 }
308
309 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
310 pa_usec_t usec, wm;
311
312 pa_assert(sleep_usec);
313 pa_assert(process_usec);
314
315 pa_assert(u);
316
317 usec = pa_sink_get_requested_latency_within_thread(u->sink);
318
319 if (usec == (pa_usec_t) -1)
320 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
321
322 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
323
324 if (wm > usec)
325 wm = usec/2;
326
327 *sleep_usec = usec - wm;
328 *process_usec = wm;
329
330 #ifdef DEBUG_TIMING
331 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
332 (unsigned long) (usec / PA_USEC_PER_MSEC),
333 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
334 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
335 #endif
336 }
337
338 static int try_recover(struct userdata *u, const char *call, int err) {
339 pa_assert(u);
340 pa_assert(call);
341 pa_assert(err < 0);
342
343 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
344
345 pa_assert(err != -EAGAIN);
346
347 if (err == -EPIPE)
348 pa_log_debug("%s: Buffer underrun!", call);
349
350 if (err == -ESTRPIPE)
351 pa_log_debug("%s: System suspended!", call);
352
353 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
354 pa_log("%s: %s", call, pa_alsa_strerror(err));
355 return -1;
356 }
357
358 u->first = TRUE;
359 u->since_start = 0;
360 return 0;
361 }
362
363 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
364 size_t left_to_play;
365
366 /* We use <= instead of < for this check here because an underrun
367 * only happens after the last sample was processed, not already when
368 * it is removed from the buffer. This is particularly important
369 * when block transfer is used. */
370
371 if (n_bytes <= u->hwbuf_size) {
372 left_to_play = u->hwbuf_size - n_bytes;
373
374 #ifdef DEBUG_TIMING
375 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
376 #endif
377
378 } else {
379 left_to_play = 0;
380
381 #ifdef DEBUG_TIMING
382 PA_DEBUG_TRAP;
383 #endif
384
385 if (!u->first && !u->after_rewind) {
386
387 if (pa_log_ratelimit())
388 pa_log_info("Underrun!");
389
390 if (u->use_tsched)
391 adjust_after_underrun(u);
392 }
393 }
394
395 return left_to_play;
396 }
397
398 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
399 pa_bool_t work_done = TRUE;
400 pa_usec_t max_sleep_usec = 0, process_usec = 0;
401 size_t left_to_play;
402 unsigned j = 0;
403
404 pa_assert(u);
405 pa_sink_assert_ref(u->sink);
406
407 if (u->use_tsched)
408 hw_sleep_time(u, &max_sleep_usec, &process_usec);
409
410 for (;;) {
411 snd_pcm_sframes_t n;
412 size_t n_bytes;
413 int r;
414 pa_bool_t after_avail = TRUE;
415
416 /* First we determine how many samples are missing to fill the
417 * buffer up to 100% */
418
419 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
420
421 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
422 continue;
423
424 return r;
425 }
426
427 n_bytes = (size_t) n * u->frame_size;
428
429 #ifdef DEBUG_TIMING
430 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
431 #endif
432
433 left_to_play = check_left_to_play(u, n_bytes);
434
435 if (u->use_tsched)
436
437 /* We won't fill up the playback buffer before at least
438 * half the sleep time is over because otherwise we might
439 * ask for more data from the clients then they expect. We
440 * need to guarantee that clients only have to keep around
441 * a single hw buffer length. */
442
443 if (!polled &&
444 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
445 #ifdef DEBUG_TIMING
446 pa_log_debug("Not filling up, because too early.");
447 #endif
448 break;
449 }
450
451 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
452
453 if (polled)
454 PA_ONCE_BEGIN {
455 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
456 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
457 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
458 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
459 pa_strnull(dn));
460 pa_xfree(dn);
461 } PA_ONCE_END;
462
463 #ifdef DEBUG_TIMING
464 pa_log_debug("Not filling up, because not necessary.");
465 #endif
466 break;
467 }
468
469
470 if (++j > 10) {
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Not filling up, because already too many iterations.");
473 #endif
474
475 break;
476 }
477
478 n_bytes -= u->hwbuf_unused;
479 polled = FALSE;
480
481 #ifdef DEBUG_TIMING
482 pa_log_debug("Filling up");
483 #endif
484
485 for (;;) {
486 pa_memchunk chunk;
487 void *p;
488 int err;
489 const snd_pcm_channel_area_t *areas;
490 snd_pcm_uframes_t offset, frames;
491 snd_pcm_sframes_t sframes;
492
493 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
494 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
495
496 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
497
498 if (!after_avail && err == -EAGAIN)
499 break;
500
501 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
502 continue;
503
504 return r;
505 }
506
507 /* Make sure that if these memblocks need to be copied they will fit into one slot */
508 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
509 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
510
511 if (!after_avail && frames == 0)
512 break;
513
514 pa_assert(frames > 0);
515 after_avail = FALSE;
516
517 /* Check these are multiples of 8 bit */
518 pa_assert((areas[0].first & 7) == 0);
519 pa_assert((areas[0].step & 7)== 0);
520
521 /* We assume a single interleaved memory buffer */
522 pa_assert((areas[0].first >> 3) == 0);
523 pa_assert((areas[0].step >> 3) == u->frame_size);
524
525 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
526
527 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
528 chunk.length = pa_memblock_get_length(chunk.memblock);
529 chunk.index = 0;
530
531 pa_sink_render_into_full(u->sink, &chunk);
532 pa_memblock_unref_fixed(chunk.memblock);
533
534 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
535
536 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
537 continue;
538
539 return r;
540 }
541
542 work_done = TRUE;
543
544 u->write_count += frames * u->frame_size;
545 u->since_start += frames * u->frame_size;
546
547 #ifdef DEBUG_TIMING
548 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
549 #endif
550
551 if ((size_t) frames * u->frame_size >= n_bytes)
552 break;
553
554 n_bytes -= (size_t) frames * u->frame_size;
555 }
556 }
557
558 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
559
560 if (*sleep_usec > process_usec)
561 *sleep_usec -= process_usec;
562 else
563 *sleep_usec = 0;
564
565 return work_done ? 1 : 0;
566 }
567
568 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
569 pa_bool_t work_done = FALSE;
570 pa_usec_t max_sleep_usec = 0, process_usec = 0;
571 size_t left_to_play;
572 unsigned j = 0;
573
574 pa_assert(u);
575 pa_sink_assert_ref(u->sink);
576
577 if (u->use_tsched)
578 hw_sleep_time(u, &max_sleep_usec, &process_usec);
579
580 for (;;) {
581 snd_pcm_sframes_t n;
582 size_t n_bytes;
583 int r;
584
585 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
586
587 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
588 continue;
589
590 return r;
591 }
592
593 n_bytes = (size_t) n * u->frame_size;
594 left_to_play = check_left_to_play(u, n_bytes);
595
596 if (u->use_tsched)
597
598 /* We won't fill up the playback buffer before at least
599 * half the sleep time is over because otherwise we might
600 * ask for more data from the clients then they expect. We
601 * need to guarantee that clients only have to keep around
602 * a single hw buffer length. */
603
604 if (!polled &&
605 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
606 break;
607
608 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
609
610 if (polled)
611 PA_ONCE_BEGIN {
612 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
613 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
614 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
615 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
616 pa_strnull(dn));
617 pa_xfree(dn);
618 } PA_ONCE_END;
619
620 break;
621 }
622
623 if (++j > 10) {
624 #ifdef DEBUG_TIMING
625 pa_log_debug("Not filling up, because already too many iterations.");
626 #endif
627
628 break;
629 }
630
631 n_bytes -= u->hwbuf_unused;
632 polled = FALSE;
633
634 for (;;) {
635 snd_pcm_sframes_t frames;
636 void *p;
637 pa_bool_t after_avail = TRUE;
638
639 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
640
641 if (u->memchunk.length <= 0)
642 pa_sink_render(u->sink, n_bytes, &u->memchunk);
643
644 pa_assert(u->memchunk.length > 0);
645
646 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
647
648 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
649 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
650
651 p = pa_memblock_acquire(u->memchunk.memblock);
652 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
653 pa_memblock_release(u->memchunk.memblock);
654
655 if (PA_UNLIKELY(frames < 0)) {
656
657 if (!after_avail && (int) frames == -EAGAIN)
658 break;
659
660 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
661 continue;
662
663 return r;
664 }
665
666 if (!after_avail && frames == 0)
667 break;
668
669 pa_assert(frames > 0);
670 after_avail = FALSE;
671
672 u->memchunk.index += (size_t) frames * u->frame_size;
673 u->memchunk.length -= (size_t) frames * u->frame_size;
674
675 if (u->memchunk.length <= 0) {
676 pa_memblock_unref(u->memchunk.memblock);
677 pa_memchunk_reset(&u->memchunk);
678 }
679
680 work_done = TRUE;
681
682 u->write_count += frames * u->frame_size;
683 u->since_start += frames * u->frame_size;
684
685 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
686
687 if ((size_t) frames * u->frame_size >= n_bytes)
688 break;
689
690 n_bytes -= (size_t) frames * u->frame_size;
691 }
692 }
693
694 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
695
696 if (*sleep_usec > process_usec)
697 *sleep_usec -= process_usec;
698 else
699 *sleep_usec = 0;
700
701 return work_done ? 1 : 0;
702 }
703
704 static void update_smoother(struct userdata *u) {
705 snd_pcm_sframes_t delay = 0;
706 int64_t position;
707 int err;
708 pa_usec_t now1 = 0, now2;
709 snd_pcm_status_t *status;
710
711 snd_pcm_status_alloca(&status);
712
713 pa_assert(u);
714 pa_assert(u->pcm_handle);
715
716 /* Let's update the time smoother */
717
718 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
719 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
720 return;
721 }
722
723 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
724 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
725 else {
726 snd_htimestamp_t htstamp = { 0, 0 };
727 snd_pcm_status_get_htstamp(status, &htstamp);
728 now1 = pa_timespec_load(&htstamp);
729 }
730
731 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
732 if (now1 <= 0)
733 now1 = pa_rtclock_now();
734
735 /* check if the time since the last update is bigger than the interval */
736 if (u->last_smoother_update > 0) {
737 if (u->last_smoother_update + u->smoother_interval > now1)
738 return;
739 }
740
741 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
742
743 if (PA_UNLIKELY(position < 0))
744 position = 0;
745
746 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
747
748 u->last_smoother_update = now1;
749 /* exponentially increase the update interval up to the MAX limit */
750 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
751
752 pa_smoother_put(u->smoother, now1, now2);
753 }
754
755 static pa_usec_t sink_get_latency(struct userdata *u) {
756 pa_usec_t r;
757 int64_t delay;
758 pa_usec_t now1, now2;
759
760 pa_assert(u);
761
762 now1 = pa_rtclock_now();
763 now2 = pa_smoother_get(u->smoother, now1);
764
765 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
766
767 r = delay >= 0 ? (pa_usec_t) delay : 0;
768
769 if (u->memchunk.memblock)
770 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
771
772 return r;
773 }
774
775 static int build_pollfd(struct userdata *u) {
776 pa_assert(u);
777 pa_assert(u->pcm_handle);
778
779 if (u->alsa_rtpoll_item)
780 pa_rtpoll_item_free(u->alsa_rtpoll_item);
781
782 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
783 return -1;
784
785 return 0;
786 }
787
788 /* Called from IO context */
789 static int suspend(struct userdata *u) {
790 pa_assert(u);
791 pa_assert(u->pcm_handle);
792
793 pa_smoother_pause(u->smoother, pa_rtclock_now());
794
795 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
796 * take awfully long with our long buffer sizes today. */
797 snd_pcm_close(u->pcm_handle);
798 u->pcm_handle = NULL;
799
800 if (u->alsa_rtpoll_item) {
801 pa_rtpoll_item_free(u->alsa_rtpoll_item);
802 u->alsa_rtpoll_item = NULL;
803 }
804
805 pa_log_info("Device suspended...");
806
807 return 0;
808 }
809
810 /* Called from IO context */
811 static int update_sw_params(struct userdata *u) {
812 snd_pcm_uframes_t avail_min;
813 int err;
814
815 pa_assert(u);
816
817 /* Use the full buffer if noone asked us for anything specific */
818 u->hwbuf_unused = 0;
819
820 if (u->use_tsched) {
821 pa_usec_t latency;
822
823 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
824 size_t b;
825
826 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
827
828 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
829
830 /* We need at least one sample in our buffer */
831
832 if (PA_UNLIKELY(b < u->frame_size))
833 b = u->frame_size;
834
835 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
836 }
837
838 fix_min_sleep_wakeup(u);
839 fix_tsched_watermark(u);
840 }
841
842 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
843
844 /* We need at last one frame in the used part of the buffer */
845 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
846
847 if (u->use_tsched) {
848 pa_usec_t sleep_usec, process_usec;
849
850 hw_sleep_time(u, &sleep_usec, &process_usec);
851 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
852 }
853
854 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
855
856 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
857 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
858 return err;
859 }
860
861 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
862
863 return 0;
864 }
865
866 /* Called from IO context */
867 static int unsuspend(struct userdata *u) {
868 pa_sample_spec ss;
869 int err;
870 pa_bool_t b, d;
871 unsigned nfrags;
872 snd_pcm_uframes_t period_size;
873
874 pa_assert(u);
875 pa_assert(!u->pcm_handle);
876
877 pa_log_info("Trying resume...");
878
879 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
880 /*SND_PCM_NONBLOCK|*/
881 SND_PCM_NO_AUTO_RESAMPLE|
882 SND_PCM_NO_AUTO_CHANNELS|
883 SND_PCM_NO_AUTO_FORMAT)) < 0) {
884 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
885 goto fail;
886 }
887
888 ss = u->sink->sample_spec;
889 nfrags = u->nfragments;
890 period_size = u->fragment_size / u->frame_size;
891 b = u->use_mmap;
892 d = u->use_tsched;
893
894 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
895 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
896 goto fail;
897 }
898
899 if (b != u->use_mmap || d != u->use_tsched) {
900 pa_log_warn("Resume failed, couldn't get original access mode.");
901 goto fail;
902 }
903
904 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
905 pa_log_warn("Resume failed, couldn't restore original sample settings.");
906 goto fail;
907 }
908
909 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
910 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
911 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
912 (unsigned long) nfrags, period_size * u->frame_size);
913 goto fail;
914 }
915
916 if (update_sw_params(u) < 0)
917 goto fail;
918
919 if (build_pollfd(u) < 0)
920 goto fail;
921
922 u->write_count = 0;
923 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
924 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
925 u->last_smoother_update = 0;
926
927 u->first = TRUE;
928 u->since_start = 0;
929
930
931 pa_log_info("Resumed successfully...");
932
933 return 0;
934
935 fail:
936 if (u->pcm_handle) {
937 snd_pcm_close(u->pcm_handle);
938 u->pcm_handle = NULL;
939 }
940
941 return -1;
942 }
943
944 /* Called from IO context */
945 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
946 struct userdata *u = PA_SINK(o)->userdata;
947
948 switch (code) {
949
950 case PA_SINK_MESSAGE_GET_LATENCY: {
951 pa_usec_t r = 0;
952
953 if (u->pcm_handle)
954 r = sink_get_latency(u);
955
956 *((pa_usec_t*) data) = r;
957
958 return 0;
959 }
960
961 case PA_SINK_MESSAGE_SET_STATE:
962
963 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
964
965 case PA_SINK_SUSPENDED:
966 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
967
968 if (suspend(u) < 0)
969 return -1;
970
971 break;
972
973 case PA_SINK_IDLE:
974 case PA_SINK_RUNNING:
975
976 if (u->sink->thread_info.state == PA_SINK_INIT) {
977 if (build_pollfd(u) < 0)
978 return -1;
979 }
980
981 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
982 if (unsuspend(u) < 0)
983 return -1;
984 }
985
986 break;
987
988 case PA_SINK_UNLINKED:
989 case PA_SINK_INIT:
990 case PA_SINK_INVALID_STATE:
991 ;
992 }
993
994 break;
995 }
996
997 return pa_sink_process_msg(o, code, data, offset, chunk);
998 }
999
1000 /* Called from main context */
1001 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
1002 pa_sink_state_t old_state;
1003 struct userdata *u;
1004
1005 pa_sink_assert_ref(s);
1006 pa_assert_se(u = s->userdata);
1007
1008 old_state = pa_sink_get_state(u->sink);
1009
1010 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1011 reserve_done(u);
1012 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1013 if (reserve_init(u, u->device_name) < 0)
1014 return -1;
1015
1016 return 0;
1017 }
1018
1019 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1020 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1021
1022 pa_assert(u);
1023 pa_assert(u->mixer_handle);
1024
1025 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1026 return 0;
1027
1028 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1029 pa_sink_get_volume(u->sink, TRUE);
1030 pa_sink_get_mute(u->sink, TRUE);
1031 }
1032
1033 return 0;
1034 }
1035
1036 static void sink_get_volume_cb(pa_sink *s) {
1037 struct userdata *u = s->userdata;
1038 pa_cvolume r;
1039 char t[PA_CVOLUME_SNPRINT_MAX];
1040
1041 pa_assert(u);
1042 pa_assert(u->mixer_path);
1043 pa_assert(u->mixer_handle);
1044
1045 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1046 return;
1047
1048 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1049 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1050
1051 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1052
1053 if (pa_cvolume_equal(&u->hardware_volume, &r))
1054 return;
1055
1056 s->real_volume = u->hardware_volume = r;
1057
1058 /* Hmm, so the hardware volume changed, let's reset our software volume */
1059 if (u->mixer_path->has_dB)
1060 pa_sink_set_soft_volume(s, NULL);
1061 }
1062
1063 static void sink_set_volume_cb(pa_sink *s) {
1064 struct userdata *u = s->userdata;
1065 pa_cvolume r;
1066 char t[PA_CVOLUME_SNPRINT_MAX];
1067
1068 pa_assert(u);
1069 pa_assert(u->mixer_path);
1070 pa_assert(u->mixer_handle);
1071
1072 /* Shift up by the base volume */
1073 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1074
1075 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1076 return;
1077
1078 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1079 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1080
1081 u->hardware_volume = r;
1082
1083 if (u->mixer_path->has_dB) {
1084 pa_cvolume new_soft_volume;
1085 pa_bool_t accurate_enough;
1086
1087 /* Match exactly what the user requested by software */
1088 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1089
1090 /* If the adjustment to do in software is only minimal we
1091 * can skip it. That saves us CPU at the expense of a bit of
1092 * accuracy */
1093 accurate_enough =
1094 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1095 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1096
1097 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1098 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1099 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1100 pa_yes_no(accurate_enough));
1101
1102 if (!accurate_enough)
1103 s->soft_volume = new_soft_volume;
1104
1105 } else {
1106 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1107
1108 /* We can't match exactly what the user requested, hence let's
1109 * at least tell the user about it */
1110
1111 s->real_volume = r;
1112 }
1113 }
1114
1115 static void sink_get_mute_cb(pa_sink *s) {
1116 struct userdata *u = s->userdata;
1117 pa_bool_t b;
1118
1119 pa_assert(u);
1120 pa_assert(u->mixer_path);
1121 pa_assert(u->mixer_handle);
1122
1123 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1124 return;
1125
1126 s->muted = b;
1127 }
1128
1129 static void sink_set_mute_cb(pa_sink *s) {
1130 struct userdata *u = s->userdata;
1131
1132 pa_assert(u);
1133 pa_assert(u->mixer_path);
1134 pa_assert(u->mixer_handle);
1135
1136 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1137 }
1138
1139 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1140 struct userdata *u = s->userdata;
1141 pa_alsa_port_data *data;
1142
1143 pa_assert(u);
1144 pa_assert(p);
1145 pa_assert(u->mixer_handle);
1146
1147 data = PA_DEVICE_PORT_DATA(p);
1148
1149 pa_assert_se(u->mixer_path = data->path);
1150 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1151
1152 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1153 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1154 s->n_volume_steps = PA_VOLUME_NORM+1;
1155
1156 if (u->mixer_path->max_dB > 0.0)
1157 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1158 else
1159 pa_log_info("No particular base volume set, fixing to 0 dB");
1160 } else {
1161 s->base_volume = PA_VOLUME_NORM;
1162 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1163 }
1164
1165 if (data->setting)
1166 pa_alsa_setting_select(data->setting, u->mixer_handle);
1167
1168 if (s->set_mute)
1169 s->set_mute(s);
1170 if (s->set_volume)
1171 s->set_volume(s);
1172
1173 return 0;
1174 }
1175
1176 static void sink_update_requested_latency_cb(pa_sink *s) {
1177 struct userdata *u = s->userdata;
1178 size_t before;
1179 pa_assert(u);
1180
1181 if (!u->pcm_handle)
1182 return;
1183
1184 before = u->hwbuf_unused;
1185 update_sw_params(u);
1186
1187 /* Let's check whether we now use only a smaller part of the
1188 buffer then before. If so, we need to make sure that subsequent
1189 rewinds are relative to the new maximum fill level and not to the
1190 current fill level. Thus, let's do a full rewind once, to clear
1191 things up. */
1192
1193 if (u->hwbuf_unused > before) {
1194 pa_log_debug("Requesting rewind due to latency change.");
1195 pa_sink_request_rewind(s, (size_t) -1);
1196 }
1197 }
1198
1199 static int process_rewind(struct userdata *u) {
1200 snd_pcm_sframes_t unused;
1201 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1202 pa_assert(u);
1203
1204 /* Figure out how much we shall rewind and reset the counter */
1205 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1206
1207 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1208
1209 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1210 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1211 return -1;
1212 }
1213
1214 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1215
1216 if (u->hwbuf_size > unused_nbytes)
1217 limit_nbytes = u->hwbuf_size - unused_nbytes;
1218 else
1219 limit_nbytes = 0;
1220
1221 if (rewind_nbytes > limit_nbytes)
1222 rewind_nbytes = limit_nbytes;
1223
1224 if (rewind_nbytes > 0) {
1225 snd_pcm_sframes_t in_frames, out_frames;
1226
1227 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1228
1229 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1230 pa_log_debug("before: %lu", (unsigned long) in_frames);
1231 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1232 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1233 if (try_recover(u, "process_rewind", out_frames) < 0)
1234 return -1;
1235 out_frames = 0;
1236 }
1237
1238 pa_log_debug("after: %lu", (unsigned long) out_frames);
1239
1240 rewind_nbytes = (size_t) out_frames * u->frame_size;
1241
1242 if (rewind_nbytes <= 0)
1243 pa_log_info("Tried rewind, but was apparently not possible.");
1244 else {
1245 u->write_count -= rewind_nbytes;
1246 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1247 pa_sink_process_rewind(u->sink, rewind_nbytes);
1248
1249 u->after_rewind = TRUE;
1250 return 0;
1251 }
1252 } else
1253 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1254
1255 pa_sink_process_rewind(u->sink, 0);
1256 return 0;
1257 }
1258
1259 static void thread_func(void *userdata) {
1260 struct userdata *u = userdata;
1261 unsigned short revents = 0;
1262
1263 pa_assert(u);
1264
1265 pa_log_debug("Thread starting up");
1266
1267 if (u->core->realtime_scheduling)
1268 pa_make_realtime(u->core->realtime_priority);
1269
1270 pa_thread_mq_install(&u->thread_mq);
1271
1272 for (;;) {
1273 int ret;
1274
1275 #ifdef DEBUG_TIMING
1276 pa_log_debug("Loop");
1277 #endif
1278
1279 /* Render some data and write it to the dsp */
1280 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1281 int work_done;
1282 pa_usec_t sleep_usec = 0;
1283
1284 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1285 if (process_rewind(u) < 0)
1286 goto fail;
1287
1288 if (u->use_mmap)
1289 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1290 else
1291 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1292
1293 if (work_done < 0)
1294 goto fail;
1295
1296 /* pa_log_debug("work_done = %i", work_done); */
1297
1298 if (work_done) {
1299
1300 if (u->first) {
1301 pa_log_info("Starting playback.");
1302 snd_pcm_start(u->pcm_handle);
1303
1304 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1305 }
1306
1307 update_smoother(u);
1308 }
1309
1310 if (u->use_tsched) {
1311 pa_usec_t cusec;
1312
1313 if (u->since_start <= u->hwbuf_size) {
1314
1315 /* USB devices on ALSA seem to hit a buffer
1316 * underrun during the first iterations much
1317 * quicker then we calculate here, probably due to
1318 * the transport latency. To accommodate for that
1319 * we artificially decrease the sleep time until
1320 * we have filled the buffer at least once
1321 * completely.*/
1322
1323 if (pa_log_ratelimit())
1324 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1325 sleep_usec /= 2;
1326 }
1327
1328 /* OK, the playback buffer is now full, let's
1329 * calculate when to wake up next */
1330 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1331
1332 /* Convert from the sound card time domain to the
1333 * system time domain */
1334 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1335
1336 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1337
1338 /* We don't trust the conversion, so we wake up whatever comes first */
1339 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1340 }
1341
1342 u->first = FALSE;
1343 u->after_rewind = FALSE;
1344
1345 } else if (u->use_tsched)
1346
1347 /* OK, we're in an invalid state, let's disable our timers */
1348 pa_rtpoll_set_timer_disabled(u->rtpoll);
1349
1350 /* Hmm, nothing to do. Let's sleep */
1351 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1352 goto fail;
1353
1354 if (ret == 0)
1355 goto finish;
1356
1357 /* Tell ALSA about this and process its response */
1358 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1359 struct pollfd *pollfd;
1360 int err;
1361 unsigned n;
1362
1363 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1364
1365 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1366 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1367 goto fail;
1368 }
1369
1370 if (revents & ~POLLOUT) {
1371 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1372 goto fail;
1373
1374 u->first = TRUE;
1375 u->since_start = 0;
1376 } else if (revents && u->use_tsched && pa_log_ratelimit())
1377 pa_log_debug("Wakeup from ALSA!");
1378
1379 } else
1380 revents = 0;
1381 }
1382
1383 fail:
1384 /* If this was no regular exit from the loop we have to continue
1385 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1386 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1387 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1388
1389 finish:
1390 pa_log_debug("Thread shutting down");
1391 }
1392
1393 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1394 const char *n;
1395 char *t;
1396
1397 pa_assert(data);
1398 pa_assert(ma);
1399 pa_assert(device_name);
1400
1401 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1402 pa_sink_new_data_set_name(data, n);
1403 data->namereg_fail = TRUE;
1404 return;
1405 }
1406
1407 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1408 data->namereg_fail = TRUE;
1409 else {
1410 n = device_id ? device_id : device_name;
1411 data->namereg_fail = FALSE;
1412 }
1413
1414 if (mapping)
1415 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1416 else
1417 t = pa_sprintf_malloc("alsa_output.%s", n);
1418
1419 pa_sink_new_data_set_name(data, t);
1420 pa_xfree(t);
1421 }
1422
1423 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1424
1425 if (!mapping && !element)
1426 return;
1427
1428 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1429 pa_log_info("Failed to find a working mixer device.");
1430 return;
1431 }
1432
1433 if (element) {
1434
1435 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1436 goto fail;
1437
1438 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1439 goto fail;
1440
1441 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1442 pa_alsa_path_dump(u->mixer_path);
1443 } else {
1444
1445 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1446 goto fail;
1447
1448 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1449
1450 pa_log_debug("Probed mixer paths:");
1451 pa_alsa_path_set_dump(u->mixer_path_set);
1452 }
1453
1454 return;
1455
1456 fail:
1457
1458 if (u->mixer_path_set) {
1459 pa_alsa_path_set_free(u->mixer_path_set);
1460 u->mixer_path_set = NULL;
1461 } else if (u->mixer_path) {
1462 pa_alsa_path_free(u->mixer_path);
1463 u->mixer_path = NULL;
1464 }
1465
1466 if (u->mixer_handle) {
1467 snd_mixer_close(u->mixer_handle);
1468 u->mixer_handle = NULL;
1469 }
1470 }
1471
1472 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1473 pa_assert(u);
1474
1475 if (!u->mixer_handle)
1476 return 0;
1477
1478 if (u->sink->active_port) {
1479 pa_alsa_port_data *data;
1480
1481 /* We have a list of supported paths, so let's activate the
1482 * one that has been chosen as active */
1483
1484 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1485 u->mixer_path = data->path;
1486
1487 pa_alsa_path_select(data->path, u->mixer_handle);
1488
1489 if (data->setting)
1490 pa_alsa_setting_select(data->setting, u->mixer_handle);
1491
1492 } else {
1493
1494 if (!u->mixer_path && u->mixer_path_set)
1495 u->mixer_path = u->mixer_path_set->paths;
1496
1497 if (u->mixer_path) {
1498 /* Hmm, we have only a single path, then let's activate it */
1499
1500 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1501
1502 if (u->mixer_path->settings)
1503 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1504 } else
1505 return 0;
1506 }
1507
1508 if (!u->mixer_path->has_volume)
1509 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1510 else {
1511
1512 if (u->mixer_path->has_dB) {
1513 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1514
1515 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1516 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1517
1518 if (u->mixer_path->max_dB > 0.0)
1519 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1520 else
1521 pa_log_info("No particular base volume set, fixing to 0 dB");
1522
1523 } else {
1524 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1525 u->sink->base_volume = PA_VOLUME_NORM;
1526 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1527 }
1528
1529 u->sink->get_volume = sink_get_volume_cb;
1530 u->sink->set_volume = sink_set_volume_cb;
1531
1532 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1533 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1534 }
1535
1536 if (!u->mixer_path->has_mute) {
1537 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1538 } else {
1539 u->sink->get_mute = sink_get_mute_cb;
1540 u->sink->set_mute = sink_set_mute_cb;
1541 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1542 pa_log_info("Using hardware mute control.");
1543 }
1544
1545 u->mixer_fdl = pa_alsa_fdlist_new();
1546
1547 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1548 pa_log("Failed to initialize file descriptor monitoring");
1549 return -1;
1550 }
1551
1552 if (u->mixer_path_set)
1553 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1554 else
1555 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1556
1557 return 0;
1558 }
1559
1560 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1561
1562 struct userdata *u = NULL;
1563 const char *dev_id = NULL;
1564 pa_sample_spec ss, requested_ss;
1565 pa_channel_map map;
1566 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1567 snd_pcm_uframes_t period_frames, tsched_frames;
1568 size_t frame_size;
1569 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1570 pa_sink_new_data data;
1571 pa_alsa_profile_set *profile_set = NULL;
1572
1573 pa_assert(m);
1574 pa_assert(ma);
1575
1576 ss = m->core->default_sample_spec;
1577 map = m->core->default_channel_map;
1578 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1579 pa_log("Failed to parse sample specification and channel map");
1580 goto fail;
1581 }
1582
1583 requested_ss = ss;
1584 frame_size = pa_frame_size(&ss);
1585
1586 nfrags = m->core->default_n_fragments;
1587 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1588 if (frag_size <= 0)
1589 frag_size = (uint32_t) frame_size;
1590 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1591 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1592
1593 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1594 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1595 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1596 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1597 pa_log("Failed to parse buffer metrics");
1598 goto fail;
1599 }
1600
1601 hwbuf_size = frag_size * nfrags;
1602 period_frames = frag_size/frame_size;
1603 tsched_frames = tsched_size/frame_size;
1604
1605 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1606 pa_log("Failed to parse mmap argument.");
1607 goto fail;
1608 }
1609
1610 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1611 pa_log("Failed to parse tsched argument.");
1612 goto fail;
1613 }
1614
1615 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1616 pa_log("Failed to parse ignore_dB argument.");
1617 goto fail;
1618 }
1619
1620 if (use_tsched && !pa_rtclock_hrtimer()) {
1621 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1622 use_tsched = FALSE;
1623 }
1624
1625 u = pa_xnew0(struct userdata, 1);
1626 u->core = m->core;
1627 u->module = m;
1628 u->use_mmap = use_mmap;
1629 u->use_tsched = use_tsched;
1630 u->first = TRUE;
1631 u->rtpoll = pa_rtpoll_new();
1632 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1633
1634 u->smoother = pa_smoother_new(
1635 DEFAULT_TSCHED_BUFFER_USEC*2,
1636 DEFAULT_TSCHED_BUFFER_USEC*2,
1637 TRUE,
1638 TRUE,
1639 5,
1640 pa_rtclock_now(),
1641 TRUE);
1642 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1643
1644 dev_id = pa_modargs_get_value(
1645 ma, "device_id",
1646 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1647
1648 if (reserve_init(u, dev_id) < 0)
1649 goto fail;
1650
1651 if (reserve_monitor_init(u, dev_id) < 0)
1652 goto fail;
1653
1654 b = use_mmap;
1655 d = use_tsched;
1656
1657 if (mapping) {
1658
1659 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1660 pa_log("device_id= not set");
1661 goto fail;
1662 }
1663
1664 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1665 dev_id,
1666 &u->device_name,
1667 &ss, &map,
1668 SND_PCM_STREAM_PLAYBACK,
1669 &nfrags, &period_frames, tsched_frames,
1670 &b, &d, mapping)))
1671
1672 goto fail;
1673
1674 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1675
1676 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1677 goto fail;
1678
1679 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1680 dev_id,
1681 &u->device_name,
1682 &ss, &map,
1683 SND_PCM_STREAM_PLAYBACK,
1684 &nfrags, &period_frames, tsched_frames,
1685 &b, &d, profile_set, &mapping)))
1686
1687 goto fail;
1688
1689 } else {
1690
1691 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1692 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1693 &u->device_name,
1694 &ss, &map,
1695 SND_PCM_STREAM_PLAYBACK,
1696 &nfrags, &period_frames, tsched_frames,
1697 &b, &d, FALSE)))
1698 goto fail;
1699 }
1700
1701 pa_assert(u->device_name);
1702 pa_log_info("Successfully opened device %s.", u->device_name);
1703
1704 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1705 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1706 goto fail;
1707 }
1708
1709 if (mapping)
1710 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1711
1712 if (use_mmap && !b) {
1713 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1714 u->use_mmap = use_mmap = FALSE;
1715 }
1716
1717 if (use_tsched && (!b || !d)) {
1718 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1719 u->use_tsched = use_tsched = FALSE;
1720 }
1721
1722 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1723 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1724 u->use_tsched = use_tsched = FALSE;
1725 }
1726
1727 if (u->use_mmap)
1728 pa_log_info("Successfully enabled mmap() mode.");
1729
1730 if (u->use_tsched)
1731 pa_log_info("Successfully enabled timer-based scheduling mode.");
1732
1733 /* ALSA might tweak the sample spec, so recalculate the frame size */
1734 frame_size = pa_frame_size(&ss);
1735
1736 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1737
1738 pa_sink_new_data_init(&data);
1739 data.driver = driver;
1740 data.module = m;
1741 data.card = card;
1742 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1743 pa_sink_new_data_set_sample_spec(&data, &ss);
1744 pa_sink_new_data_set_channel_map(&data, &map);
1745
1746 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1747 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1748 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1749 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1750 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1751
1752 if (mapping) {
1753 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1754 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1755 }
1756
1757 pa_alsa_init_description(data.proplist);
1758
1759 if (u->control_device)
1760 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1761
1762 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1763 pa_log("Invalid properties");
1764 pa_sink_new_data_done(&data);
1765 goto fail;
1766 }
1767
1768 if (u->mixer_path_set)
1769 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1770
1771 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1772 pa_sink_new_data_done(&data);
1773
1774 if (!u->sink) {
1775 pa_log("Failed to create sink object");
1776 goto fail;
1777 }
1778
1779 u->sink->parent.process_msg = sink_process_msg;
1780 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1781 u->sink->set_state = sink_set_state_cb;
1782 u->sink->set_port = sink_set_port_cb;
1783 u->sink->userdata = u;
1784
1785 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1786 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1787
1788 u->frame_size = frame_size;
1789 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1790 u->nfragments = nfrags;
1791 u->hwbuf_size = u->fragment_size * nfrags;
1792 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1793 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1794
1795 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1796 nfrags, (long unsigned) u->fragment_size,
1797 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1798
1799 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1800 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1801
1802 if (u->use_tsched) {
1803 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1804
1805 fix_min_sleep_wakeup(u);
1806 fix_tsched_watermark(u);
1807
1808 pa_sink_set_latency_range(u->sink,
1809 0,
1810 pa_bytes_to_usec(u->hwbuf_size, &ss));
1811
1812 pa_log_info("Time scheduling watermark is %0.2fms",
1813 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1814 } else
1815 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1816
1817 reserve_update(u);
1818
1819 if (update_sw_params(u) < 0)
1820 goto fail;
1821
1822 if (setup_mixer(u, ignore_dB) < 0)
1823 goto fail;
1824
1825 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1826
1827 if (!(u->thread = pa_thread_new(thread_func, u))) {
1828 pa_log("Failed to create thread.");
1829 goto fail;
1830 }
1831
1832 /* Get initial mixer settings */
1833 if (data.volume_is_set) {
1834 if (u->sink->set_volume)
1835 u->sink->set_volume(u->sink);
1836 } else {
1837 if (u->sink->get_volume)
1838 u->sink->get_volume(u->sink);
1839 }
1840
1841 if (data.muted_is_set) {
1842 if (u->sink->set_mute)
1843 u->sink->set_mute(u->sink);
1844 } else {
1845 if (u->sink->get_mute)
1846 u->sink->get_mute(u->sink);
1847 }
1848
1849 pa_sink_put(u->sink);
1850
1851 if (profile_set)
1852 pa_alsa_profile_set_free(profile_set);
1853
1854 return u->sink;
1855
1856 fail:
1857
1858 if (u)
1859 userdata_free(u);
1860
1861 if (profile_set)
1862 pa_alsa_profile_set_free(profile_set);
1863
1864 return NULL;
1865 }
1866
1867 static void userdata_free(struct userdata *u) {
1868 pa_assert(u);
1869
1870 if (u->sink)
1871 pa_sink_unlink(u->sink);
1872
1873 if (u->thread) {
1874 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1875 pa_thread_free(u->thread);
1876 }
1877
1878 pa_thread_mq_done(&u->thread_mq);
1879
1880 if (u->sink)
1881 pa_sink_unref(u->sink);
1882
1883 if (u->memchunk.memblock)
1884 pa_memblock_unref(u->memchunk.memblock);
1885
1886 if (u->alsa_rtpoll_item)
1887 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1888
1889 if (u->rtpoll)
1890 pa_rtpoll_free(u->rtpoll);
1891
1892 if (u->pcm_handle) {
1893 snd_pcm_drop(u->pcm_handle);
1894 snd_pcm_close(u->pcm_handle);
1895 }
1896
1897 if (u->mixer_fdl)
1898 pa_alsa_fdlist_free(u->mixer_fdl);
1899
1900 if (u->mixer_path_set)
1901 pa_alsa_path_set_free(u->mixer_path_set);
1902 else if (u->mixer_path)
1903 pa_alsa_path_free(u->mixer_path);
1904
1905 if (u->mixer_handle)
1906 snd_mixer_close(u->mixer_handle);
1907
1908 if (u->smoother)
1909 pa_smoother_free(u->smoother);
1910
1911 reserve_done(u);
1912 monitor_done(u);
1913
1914 pa_xfree(u->device_name);
1915 pa_xfree(u->control_device);
1916 pa_xfree(u);
1917 }
1918
1919 void pa_alsa_sink_free(pa_sink *s) {
1920 struct userdata *u;
1921
1922 pa_sink_assert_ref(s);
1923 pa_assert_se(u = s->userdata);
1924
1925 userdata_free(u);
1926 }