]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
72
73 struct userdata {
74 pa_core *core;
75 pa_module *module;
76 pa_sink *sink;
77
78 pa_thread *thread;
79 pa_thread_mq thread_mq;
80 pa_rtpoll *rtpoll;
81
82 snd_pcm_t *pcm_handle;
83
84 pa_alsa_fdlist *mixer_fdl;
85 snd_mixer_t *mixer_handle;
86 pa_alsa_path_set *mixer_path_set;
87 pa_alsa_path *mixer_path;
88
89 pa_cvolume hardware_volume;
90
91 size_t
92 frame_size,
93 fragment_size,
94 hwbuf_size,
95 tsched_watermark,
96 hwbuf_unused,
97 min_sleep,
98 min_wakeup,
99 watermark_step;
100
101 unsigned nfragments;
102 pa_memchunk memchunk;
103
104 char *device_name; /* name of the PCM device */
105 char *control_device; /* name of the control device */
106
107 pa_bool_t use_mmap:1, use_tsched:1;
108
109 pa_bool_t first, after_rewind;
110
111 pa_rtpoll_item *alsa_rtpoll_item;
112
113 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
114
115 pa_smoother *smoother;
116 uint64_t write_count;
117 uint64_t since_start;
118
119 pa_reserve_wrapper *reserve;
120 pa_hook_slot *reserve_slot;
121 pa_reserve_monitor_wrapper *monitor;
122 pa_hook_slot *monitor_slot;
123 };
124
125 static void userdata_free(struct userdata *u);
126
127 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
128 pa_assert(r);
129 pa_assert(u);
130
131 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
132 return PA_HOOK_CANCEL;
133
134 return PA_HOOK_OK;
135 }
136
137 static void reserve_done(struct userdata *u) {
138 pa_assert(u);
139
140 if (u->reserve_slot) {
141 pa_hook_slot_free(u->reserve_slot);
142 u->reserve_slot = NULL;
143 }
144
145 if (u->reserve) {
146 pa_reserve_wrapper_unref(u->reserve);
147 u->reserve = NULL;
148 }
149 }
150
151 static void reserve_update(struct userdata *u) {
152 const char *description;
153 pa_assert(u);
154
155 if (!u->sink || !u->reserve)
156 return;
157
158 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
159 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
160 }
161
162 static int reserve_init(struct userdata *u, const char *dname) {
163 char *rname;
164
165 pa_assert(u);
166 pa_assert(dname);
167
168 if (u->reserve)
169 return 0;
170
171 if (pa_in_system_mode())
172 return 0;
173
174 if (!(rname = pa_alsa_get_reserve_name(dname)))
175 return 0;
176
177 /* We are resuming, try to lock the device */
178 u->reserve = pa_reserve_wrapper_get(u->core, rname);
179 pa_xfree(rname);
180
181 if (!(u->reserve))
182 return -1;
183
184 reserve_update(u);
185
186 pa_assert(!u->reserve_slot);
187 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
188
189 return 0;
190 }
191
192 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
193 pa_bool_t b;
194
195 pa_assert(w);
196 pa_assert(u);
197
198 b = PA_PTR_TO_UINT(busy) && !u->reserve;
199
200 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
201 return PA_HOOK_OK;
202 }
203
204 static void monitor_done(struct userdata *u) {
205 pa_assert(u);
206
207 if (u->monitor_slot) {
208 pa_hook_slot_free(u->monitor_slot);
209 u->monitor_slot = NULL;
210 }
211
212 if (u->monitor) {
213 pa_reserve_monitor_wrapper_unref(u->monitor);
214 u->monitor = NULL;
215 }
216 }
217
218 static int reserve_monitor_init(struct userdata *u, const char *dname) {
219 char *rname;
220
221 pa_assert(u);
222 pa_assert(dname);
223
224 if (pa_in_system_mode())
225 return 0;
226
227 if (!(rname = pa_alsa_get_reserve_name(dname)))
228 return 0;
229
230 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
231 pa_xfree(rname);
232
233 if (!(u->monitor))
234 return -1;
235
236 pa_assert(!u->monitor_slot);
237 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
238
239 return 0;
240 }
241
242 static void fix_min_sleep_wakeup(struct userdata *u) {
243 size_t max_use, max_use_2;
244
245 pa_assert(u);
246
247 max_use = u->hwbuf_size - u->hwbuf_unused;
248 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
249
250 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
251 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
252
253 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
254 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
255 }
256
257 static void fix_tsched_watermark(struct userdata *u) {
258 size_t max_use;
259 pa_assert(u);
260
261 max_use = u->hwbuf_size - u->hwbuf_unused;
262
263 if (u->tsched_watermark > max_use - u->min_sleep)
264 u->tsched_watermark = max_use - u->min_sleep;
265
266 if (u->tsched_watermark < u->min_wakeup)
267 u->tsched_watermark = u->min_wakeup;
268 }
269
270 static void adjust_after_underrun(struct userdata *u) {
271 size_t old_watermark;
272 pa_usec_t old_min_latency, new_min_latency;
273
274 pa_assert(u);
275 pa_assert(u->use_tsched);
276
277 /* First, just try to increase the watermark */
278 old_watermark = u->tsched_watermark;
279 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
280 fix_tsched_watermark(u);
281
282 if (old_watermark != u->tsched_watermark) {
283 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
284 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
285 return;
286 }
287
288 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
289 old_min_latency = u->sink->thread_info.min_latency;
290 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
291 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
292
293 if (old_min_latency != new_min_latency) {
294 pa_log_notice("Increasing minimal latency to %0.2f ms",
295 (double) new_min_latency / PA_USEC_PER_MSEC);
296
297 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
298 return;
299 }
300
301 /* When we reach this we're officialy fucked! */
302 }
303
304 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
305 pa_usec_t usec, wm;
306
307 pa_assert(sleep_usec);
308 pa_assert(process_usec);
309
310 pa_assert(u);
311
312 usec = pa_sink_get_requested_latency_within_thread(u->sink);
313
314 if (usec == (pa_usec_t) -1)
315 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
316
317 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
318
319 if (wm > usec)
320 wm = usec/2;
321
322 *sleep_usec = usec - wm;
323 *process_usec = wm;
324
325 #ifdef DEBUG_TIMING
326 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
327 (unsigned long) (usec / PA_USEC_PER_MSEC),
328 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
329 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
330 #endif
331 }
332
333 static int try_recover(struct userdata *u, const char *call, int err) {
334 pa_assert(u);
335 pa_assert(call);
336 pa_assert(err < 0);
337
338 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
339
340 pa_assert(err != -EAGAIN);
341
342 if (err == -EPIPE)
343 pa_log_debug("%s: Buffer underrun!", call);
344
345 if (err == -ESTRPIPE)
346 pa_log_debug("%s: System suspended!", call);
347
348 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
349 pa_log("%s: %s", call, pa_alsa_strerror(err));
350 return -1;
351 }
352
353 u->first = TRUE;
354 u->since_start = 0;
355 return 0;
356 }
357
358 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
359 size_t left_to_play;
360
361 /* We use <= instead of < for this check here because an underrun
362 * only happens after the last sample was processed, not already when
363 * it is removed from the buffer. This is particularly important
364 * when block transfer is used. */
365
366 if (n_bytes <= u->hwbuf_size) {
367 left_to_play = u->hwbuf_size - n_bytes;
368
369 #ifdef DEBUG_TIMING
370 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
371 #endif
372
373 } else {
374 left_to_play = 0;
375
376 #ifdef DEBUG_TIMING
377 PA_DEBUG_TRAP;
378 #endif
379
380 if (!u->first && !u->after_rewind) {
381
382 if (pa_log_ratelimit())
383 pa_log_info("Underrun!");
384
385 if (u->use_tsched)
386 adjust_after_underrun(u);
387 }
388 }
389
390 return left_to_play;
391 }
392
393 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
394 pa_bool_t work_done = TRUE;
395 pa_usec_t max_sleep_usec = 0, process_usec = 0;
396 size_t left_to_play;
397 unsigned j = 0;
398
399 pa_assert(u);
400 pa_sink_assert_ref(u->sink);
401
402 if (u->use_tsched)
403 hw_sleep_time(u, &max_sleep_usec, &process_usec);
404
405 for (;;) {
406 snd_pcm_sframes_t n;
407 size_t n_bytes;
408 int r;
409 pa_bool_t after_avail = TRUE;
410
411 /* First we determine how many samples are missing to fill the
412 * buffer up to 100% */
413
414 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
415
416 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
417 continue;
418
419 return r;
420 }
421
422 n_bytes = (size_t) n * u->frame_size;
423
424 #ifdef DEBUG_TIMING
425 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
426 #endif
427
428 left_to_play = check_left_to_play(u, n_bytes);
429
430 if (u->use_tsched)
431
432 /* We won't fill up the playback buffer before at least
433 * half the sleep time is over because otherwise we might
434 * ask for more data from the clients then they expect. We
435 * need to guarantee that clients only have to keep around
436 * a single hw buffer length. */
437
438 if (!polled &&
439 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
440 #ifdef DEBUG_TIMING
441 pa_log_debug("Not filling up, because too early.");
442 #endif
443 break;
444 }
445
446 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
447
448 if (polled)
449 PA_ONCE_BEGIN {
450 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
451 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
452 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
453 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
454 pa_strnull(dn));
455 pa_xfree(dn);
456 } PA_ONCE_END;
457
458 #ifdef DEBUG_TIMING
459 pa_log_debug("Not filling up, because not necessary.");
460 #endif
461 break;
462 }
463
464
465 if (++j > 10) {
466 #ifdef DEBUG_TIMING
467 pa_log_debug("Not filling up, because already too many iterations.");
468 #endif
469
470 break;
471 }
472
473 n_bytes -= u->hwbuf_unused;
474 polled = FALSE;
475
476 #ifdef DEBUG_TIMING
477 pa_log_debug("Filling up");
478 #endif
479
480 for (;;) {
481 pa_memchunk chunk;
482 void *p;
483 int err;
484 const snd_pcm_channel_area_t *areas;
485 snd_pcm_uframes_t offset, frames;
486 snd_pcm_sframes_t sframes;
487
488 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
489 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
490
491 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
492
493 if (!after_avail && err == -EAGAIN)
494 break;
495
496 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
497 continue;
498
499 return r;
500 }
501
502 /* Make sure that if these memblocks need to be copied they will fit into one slot */
503 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
504 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
505
506 if (!after_avail && frames == 0)
507 break;
508
509 pa_assert(frames > 0);
510 after_avail = FALSE;
511
512 /* Check these are multiples of 8 bit */
513 pa_assert((areas[0].first & 7) == 0);
514 pa_assert((areas[0].step & 7)== 0);
515
516 /* We assume a single interleaved memory buffer */
517 pa_assert((areas[0].first >> 3) == 0);
518 pa_assert((areas[0].step >> 3) == u->frame_size);
519
520 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
521
522 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
523 chunk.length = pa_memblock_get_length(chunk.memblock);
524 chunk.index = 0;
525
526 pa_sink_render_into_full(u->sink, &chunk);
527 pa_memblock_unref_fixed(chunk.memblock);
528
529 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
530
531 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
532 continue;
533
534 return r;
535 }
536
537 work_done = TRUE;
538
539 u->write_count += frames * u->frame_size;
540 u->since_start += frames * u->frame_size;
541
542 #ifdef DEBUG_TIMING
543 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
544 #endif
545
546 if ((size_t) frames * u->frame_size >= n_bytes)
547 break;
548
549 n_bytes -= (size_t) frames * u->frame_size;
550 }
551 }
552
553 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
554
555 if (*sleep_usec > process_usec)
556 *sleep_usec -= process_usec;
557 else
558 *sleep_usec = 0;
559
560 return work_done ? 1 : 0;
561 }
562
563 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
564 pa_bool_t work_done = FALSE;
565 pa_usec_t max_sleep_usec = 0, process_usec = 0;
566 size_t left_to_play;
567 unsigned j = 0;
568
569 pa_assert(u);
570 pa_sink_assert_ref(u->sink);
571
572 if (u->use_tsched)
573 hw_sleep_time(u, &max_sleep_usec, &process_usec);
574
575 for (;;) {
576 snd_pcm_sframes_t n;
577 size_t n_bytes;
578 int r;
579
580 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
581
582 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
583 continue;
584
585 return r;
586 }
587
588 n_bytes = (size_t) n * u->frame_size;
589 left_to_play = check_left_to_play(u, n_bytes);
590
591 if (u->use_tsched)
592
593 /* We won't fill up the playback buffer before at least
594 * half the sleep time is over because otherwise we might
595 * ask for more data from the clients then they expect. We
596 * need to guarantee that clients only have to keep around
597 * a single hw buffer length. */
598
599 if (!polled &&
600 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
601 break;
602
603 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
604
605 if (polled)
606 PA_ONCE_BEGIN {
607 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
608 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
609 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
610 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
611 pa_strnull(dn));
612 pa_xfree(dn);
613 } PA_ONCE_END;
614
615 break;
616 }
617
618 if (++j > 10) {
619 #ifdef DEBUG_TIMING
620 pa_log_debug("Not filling up, because already too many iterations.");
621 #endif
622
623 break;
624 }
625
626 n_bytes -= u->hwbuf_unused;
627 polled = FALSE;
628
629 for (;;) {
630 snd_pcm_sframes_t frames;
631 void *p;
632 pa_bool_t after_avail = TRUE;
633
634 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
635
636 if (u->memchunk.length <= 0)
637 pa_sink_render(u->sink, n_bytes, &u->memchunk);
638
639 pa_assert(u->memchunk.length > 0);
640
641 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
642
643 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
644 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
645
646 p = pa_memblock_acquire(u->memchunk.memblock);
647 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
648 pa_memblock_release(u->memchunk.memblock);
649
650 if (PA_UNLIKELY(frames < 0)) {
651
652 if (!after_avail && (int) frames == -EAGAIN)
653 break;
654
655 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
656 continue;
657
658 return r;
659 }
660
661 if (!after_avail && frames == 0)
662 break;
663
664 pa_assert(frames > 0);
665 after_avail = FALSE;
666
667 u->memchunk.index += (size_t) frames * u->frame_size;
668 u->memchunk.length -= (size_t) frames * u->frame_size;
669
670 if (u->memchunk.length <= 0) {
671 pa_memblock_unref(u->memchunk.memblock);
672 pa_memchunk_reset(&u->memchunk);
673 }
674
675 work_done = TRUE;
676
677 u->write_count += frames * u->frame_size;
678 u->since_start += frames * u->frame_size;
679
680 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
681
682 if ((size_t) frames * u->frame_size >= n_bytes)
683 break;
684
685 n_bytes -= (size_t) frames * u->frame_size;
686 }
687 }
688
689 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
690
691 if (*sleep_usec > process_usec)
692 *sleep_usec -= process_usec;
693 else
694 *sleep_usec = 0;
695
696 return work_done ? 1 : 0;
697 }
698
699 static void update_smoother(struct userdata *u) {
700 snd_pcm_sframes_t delay = 0;
701 int64_t position;
702 int err;
703 pa_usec_t now1 = 0, now2;
704 snd_pcm_status_t *status;
705
706 snd_pcm_status_alloca(&status);
707
708 pa_assert(u);
709 pa_assert(u->pcm_handle);
710
711 /* Let's update the time smoother */
712
713 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
714 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
715 return;
716 }
717
718 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
719 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
720 else {
721 snd_htimestamp_t htstamp = { 0, 0 };
722 snd_pcm_status_get_htstamp(status, &htstamp);
723 now1 = pa_timespec_load(&htstamp);
724 }
725
726 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
727
728 if (PA_UNLIKELY(position < 0))
729 position = 0;
730
731 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
732 if (now1 <= 0)
733 now1 = pa_rtclock_now();
734
735 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
736
737 pa_smoother_put(u->smoother, now1, now2);
738 }
739
740 static pa_usec_t sink_get_latency(struct userdata *u) {
741 pa_usec_t r;
742 int64_t delay;
743 pa_usec_t now1, now2;
744
745 pa_assert(u);
746
747 now1 = pa_rtclock_now();
748 now2 = pa_smoother_get(u->smoother, now1);
749
750 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
751
752 r = delay >= 0 ? (pa_usec_t) delay : 0;
753
754 if (u->memchunk.memblock)
755 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
756
757 return r;
758 }
759
760 static int build_pollfd(struct userdata *u) {
761 pa_assert(u);
762 pa_assert(u->pcm_handle);
763
764 if (u->alsa_rtpoll_item)
765 pa_rtpoll_item_free(u->alsa_rtpoll_item);
766
767 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
768 return -1;
769
770 return 0;
771 }
772
773 /* Called from IO context */
774 static int suspend(struct userdata *u) {
775 pa_assert(u);
776 pa_assert(u->pcm_handle);
777
778 pa_smoother_pause(u->smoother, pa_rtclock_now());
779
780 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
781 * take awfully long with our long buffer sizes today. */
782 snd_pcm_close(u->pcm_handle);
783 u->pcm_handle = NULL;
784
785 if (u->alsa_rtpoll_item) {
786 pa_rtpoll_item_free(u->alsa_rtpoll_item);
787 u->alsa_rtpoll_item = NULL;
788 }
789
790 pa_log_info("Device suspended...");
791
792 return 0;
793 }
794
795 /* Called from IO context */
796 static int update_sw_params(struct userdata *u) {
797 snd_pcm_uframes_t avail_min;
798 int err;
799
800 pa_assert(u);
801
802 /* Use the full buffer if noone asked us for anything specific */
803 u->hwbuf_unused = 0;
804
805 if (u->use_tsched) {
806 pa_usec_t latency;
807
808 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
809 size_t b;
810
811 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
812
813 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
814
815 /* We need at least one sample in our buffer */
816
817 if (PA_UNLIKELY(b < u->frame_size))
818 b = u->frame_size;
819
820 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
821 }
822
823 fix_min_sleep_wakeup(u);
824 fix_tsched_watermark(u);
825 }
826
827 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
828
829 /* We need at last one frame in the used part of the buffer */
830 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
831
832 if (u->use_tsched) {
833 pa_usec_t sleep_usec, process_usec;
834
835 hw_sleep_time(u, &sleep_usec, &process_usec);
836 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
837 }
838
839 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
840
841 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
842 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
843 return err;
844 }
845
846 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
847
848 return 0;
849 }
850
851 /* Called from IO context */
852 static int unsuspend(struct userdata *u) {
853 pa_sample_spec ss;
854 int err;
855 pa_bool_t b, d;
856 unsigned nfrags;
857 snd_pcm_uframes_t period_size;
858
859 pa_assert(u);
860 pa_assert(!u->pcm_handle);
861
862 pa_log_info("Trying resume...");
863
864 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
865 /*SND_PCM_NONBLOCK|*/
866 SND_PCM_NO_AUTO_RESAMPLE|
867 SND_PCM_NO_AUTO_CHANNELS|
868 SND_PCM_NO_AUTO_FORMAT)) < 0) {
869 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
870 goto fail;
871 }
872
873 ss = u->sink->sample_spec;
874 nfrags = u->nfragments;
875 period_size = u->fragment_size / u->frame_size;
876 b = u->use_mmap;
877 d = u->use_tsched;
878
879 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
880 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
881 goto fail;
882 }
883
884 if (b != u->use_mmap || d != u->use_tsched) {
885 pa_log_warn("Resume failed, couldn't get original access mode.");
886 goto fail;
887 }
888
889 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
890 pa_log_warn("Resume failed, couldn't restore original sample settings.");
891 goto fail;
892 }
893
894 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
895 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
896 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
897 (unsigned long) nfrags, period_size * u->frame_size);
898 goto fail;
899 }
900
901 if (update_sw_params(u) < 0)
902 goto fail;
903
904 if (build_pollfd(u) < 0)
905 goto fail;
906
907 u->write_count = 0;
908 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
909
910 u->first = TRUE;
911 u->since_start = 0;
912
913
914 pa_log_info("Resumed successfully...");
915
916 return 0;
917
918 fail:
919 if (u->pcm_handle) {
920 snd_pcm_close(u->pcm_handle);
921 u->pcm_handle = NULL;
922 }
923
924 return -1;
925 }
926
927 /* Called from IO context */
928 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
929 struct userdata *u = PA_SINK(o)->userdata;
930
931 switch (code) {
932
933 case PA_SINK_MESSAGE_GET_LATENCY: {
934 pa_usec_t r = 0;
935
936 if (u->pcm_handle)
937 r = sink_get_latency(u);
938
939 *((pa_usec_t*) data) = r;
940
941 return 0;
942 }
943
944 case PA_SINK_MESSAGE_SET_STATE:
945
946 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
947
948 case PA_SINK_SUSPENDED:
949 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
950
951 if (suspend(u) < 0)
952 return -1;
953
954 break;
955
956 case PA_SINK_IDLE:
957 case PA_SINK_RUNNING:
958
959 if (u->sink->thread_info.state == PA_SINK_INIT) {
960 if (build_pollfd(u) < 0)
961 return -1;
962 }
963
964 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
965 if (unsuspend(u) < 0)
966 return -1;
967 }
968
969 break;
970
971 case PA_SINK_UNLINKED:
972 case PA_SINK_INIT:
973 case PA_SINK_INVALID_STATE:
974 ;
975 }
976
977 break;
978 }
979
980 return pa_sink_process_msg(o, code, data, offset, chunk);
981 }
982
983 /* Called from main context */
984 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
985 pa_sink_state_t old_state;
986 struct userdata *u;
987
988 pa_sink_assert_ref(s);
989 pa_assert_se(u = s->userdata);
990
991 old_state = pa_sink_get_state(u->sink);
992
993 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
994 reserve_done(u);
995 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
996 if (reserve_init(u, u->device_name) < 0)
997 return -1;
998
999 return 0;
1000 }
1001
1002 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1003 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1004
1005 pa_assert(u);
1006 pa_assert(u->mixer_handle);
1007
1008 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1009 return 0;
1010
1011 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1012 pa_sink_get_volume(u->sink, TRUE);
1013 pa_sink_get_mute(u->sink, TRUE);
1014 }
1015
1016 return 0;
1017 }
1018
1019 static void sink_get_volume_cb(pa_sink *s) {
1020 struct userdata *u = s->userdata;
1021 pa_cvolume r;
1022 char t[PA_CVOLUME_SNPRINT_MAX];
1023
1024 pa_assert(u);
1025 pa_assert(u->mixer_path);
1026 pa_assert(u->mixer_handle);
1027
1028 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1029 return;
1030
1031 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1032 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1033
1034 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1035
1036 if (pa_cvolume_equal(&u->hardware_volume, &r))
1037 return;
1038
1039 s->real_volume = u->hardware_volume = r;
1040
1041 /* Hmm, so the hardware volume changed, let's reset our software volume */
1042 if (u->mixer_path->has_dB)
1043 pa_sink_set_soft_volume(s, NULL);
1044 }
1045
1046 static void sink_set_volume_cb(pa_sink *s) {
1047 struct userdata *u = s->userdata;
1048 pa_cvolume r;
1049 char t[PA_CVOLUME_SNPRINT_MAX];
1050
1051 pa_assert(u);
1052 pa_assert(u->mixer_path);
1053 pa_assert(u->mixer_handle);
1054
1055 /* Shift up by the base volume */
1056 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1057
1058 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1059 return;
1060
1061 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1062 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1063
1064 u->hardware_volume = r;
1065
1066 if (u->mixer_path->has_dB) {
1067 pa_cvolume new_soft_volume;
1068 pa_bool_t accurate_enough;
1069
1070 /* Match exactly what the user requested by software */
1071 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1072
1073 /* If the adjustment to do in software is only minimal we
1074 * can skip it. That saves us CPU at the expense of a bit of
1075 * accuracy */
1076 accurate_enough =
1077 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1078 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1079
1080 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->real_volume));
1081 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1082 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)", pa_cvolume_snprint(t, sizeof(t), &new_soft_volume),
1083 pa_yes_no(accurate_enough));
1084
1085 if (!accurate_enough)
1086 s->soft_volume = new_soft_volume;
1087
1088 } else {
1089 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1090
1091 /* We can't match exactly what the user requested, hence let's
1092 * at least tell the user about it */
1093
1094 s->real_volume = r;
1095 }
1096 }
1097
1098 static void sink_get_mute_cb(pa_sink *s) {
1099 struct userdata *u = s->userdata;
1100 pa_bool_t b;
1101
1102 pa_assert(u);
1103 pa_assert(u->mixer_path);
1104 pa_assert(u->mixer_handle);
1105
1106 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1107 return;
1108
1109 s->muted = b;
1110 }
1111
1112 static void sink_set_mute_cb(pa_sink *s) {
1113 struct userdata *u = s->userdata;
1114
1115 pa_assert(u);
1116 pa_assert(u->mixer_path);
1117 pa_assert(u->mixer_handle);
1118
1119 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1120 }
1121
1122 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1123 struct userdata *u = s->userdata;
1124 pa_alsa_port_data *data;
1125
1126 pa_assert(u);
1127 pa_assert(p);
1128 pa_assert(u->mixer_handle);
1129
1130 data = PA_DEVICE_PORT_DATA(p);
1131
1132 pa_assert_se(u->mixer_path = data->path);
1133 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1134
1135 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1136 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1137 s->n_volume_steps = PA_VOLUME_NORM+1;
1138
1139 if (u->mixer_path->max_dB > 0.0)
1140 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1141 else
1142 pa_log_info("No particular base volume set, fixing to 0 dB");
1143 } else {
1144 s->base_volume = PA_VOLUME_NORM;
1145 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1146 }
1147
1148 if (data->setting)
1149 pa_alsa_setting_select(data->setting, u->mixer_handle);
1150
1151 if (s->set_mute)
1152 s->set_mute(s);
1153 if (s->set_volume)
1154 s->set_volume(s);
1155
1156 return 0;
1157 }
1158
1159 static void sink_update_requested_latency_cb(pa_sink *s) {
1160 struct userdata *u = s->userdata;
1161 size_t before;
1162 pa_assert(u);
1163
1164 if (!u->pcm_handle)
1165 return;
1166
1167 before = u->hwbuf_unused;
1168 update_sw_params(u);
1169
1170 /* Let's check whether we now use only a smaller part of the
1171 buffer then before. If so, we need to make sure that subsequent
1172 rewinds are relative to the new maximum fill level and not to the
1173 current fill level. Thus, let's do a full rewind once, to clear
1174 things up. */
1175
1176 if (u->hwbuf_unused > before) {
1177 pa_log_debug("Requesting rewind due to latency change.");
1178 pa_sink_request_rewind(s, (size_t) -1);
1179 }
1180 }
1181
1182 static int process_rewind(struct userdata *u) {
1183 snd_pcm_sframes_t unused;
1184 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1185 pa_assert(u);
1186
1187 /* Figure out how much we shall rewind and reset the counter */
1188 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1189
1190 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1191
1192 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1193 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1194 return -1;
1195 }
1196
1197 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1198
1199 if (u->hwbuf_size > unused_nbytes)
1200 limit_nbytes = u->hwbuf_size - unused_nbytes;
1201 else
1202 limit_nbytes = 0;
1203
1204 if (rewind_nbytes > limit_nbytes)
1205 rewind_nbytes = limit_nbytes;
1206
1207 if (rewind_nbytes > 0) {
1208 snd_pcm_sframes_t in_frames, out_frames;
1209
1210 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1211
1212 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1213 pa_log_debug("before: %lu", (unsigned long) in_frames);
1214 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1215 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1216 if (try_recover(u, "process_rewind", out_frames) < 0)
1217 return -1;
1218 out_frames = 0;
1219 }
1220
1221 pa_log_debug("after: %lu", (unsigned long) out_frames);
1222
1223 rewind_nbytes = (size_t) out_frames * u->frame_size;
1224
1225 if (rewind_nbytes <= 0)
1226 pa_log_info("Tried rewind, but was apparently not possible.");
1227 else {
1228 u->write_count -= rewind_nbytes;
1229 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1230 pa_sink_process_rewind(u->sink, rewind_nbytes);
1231
1232 u->after_rewind = TRUE;
1233 return 0;
1234 }
1235 } else
1236 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1237
1238 pa_sink_process_rewind(u->sink, 0);
1239 return 0;
1240 }
1241
1242 static void thread_func(void *userdata) {
1243 struct userdata *u = userdata;
1244 unsigned short revents = 0;
1245
1246 pa_assert(u);
1247
1248 pa_log_debug("Thread starting up");
1249
1250 if (u->core->realtime_scheduling)
1251 pa_make_realtime(u->core->realtime_priority);
1252
1253 pa_thread_mq_install(&u->thread_mq);
1254
1255 for (;;) {
1256 int ret;
1257
1258 #ifdef DEBUG_TIMING
1259 pa_log_debug("Loop");
1260 #endif
1261
1262 /* Render some data and write it to the dsp */
1263 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1264 int work_done;
1265 pa_usec_t sleep_usec = 0;
1266
1267 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1268 if (process_rewind(u) < 0)
1269 goto fail;
1270
1271 if (u->use_mmap)
1272 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1273 else
1274 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1275
1276 if (work_done < 0)
1277 goto fail;
1278
1279 /* pa_log_debug("work_done = %i", work_done); */
1280
1281 if (work_done) {
1282
1283 if (u->first) {
1284 pa_log_info("Starting playback.");
1285 snd_pcm_start(u->pcm_handle);
1286
1287 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1288 }
1289
1290 update_smoother(u);
1291 }
1292
1293 if (u->use_tsched) {
1294 pa_usec_t cusec;
1295
1296 if (u->since_start <= u->hwbuf_size) {
1297
1298 /* USB devices on ALSA seem to hit a buffer
1299 * underrun during the first iterations much
1300 * quicker then we calculate here, probably due to
1301 * the transport latency. To accommodate for that
1302 * we artificially decrease the sleep time until
1303 * we have filled the buffer at least once
1304 * completely.*/
1305
1306 if (pa_log_ratelimit())
1307 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1308 sleep_usec /= 2;
1309 }
1310
1311 /* OK, the playback buffer is now full, let's
1312 * calculate when to wake up next */
1313 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1314
1315 /* Convert from the sound card time domain to the
1316 * system time domain */
1317 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1318
1319 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1320
1321 /* We don't trust the conversion, so we wake up whatever comes first */
1322 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1323 }
1324
1325 u->first = FALSE;
1326 u->after_rewind = FALSE;
1327
1328 } else if (u->use_tsched)
1329
1330 /* OK, we're in an invalid state, let's disable our timers */
1331 pa_rtpoll_set_timer_disabled(u->rtpoll);
1332
1333 /* Hmm, nothing to do. Let's sleep */
1334 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1335 goto fail;
1336
1337 if (ret == 0)
1338 goto finish;
1339
1340 /* Tell ALSA about this and process its response */
1341 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1342 struct pollfd *pollfd;
1343 int err;
1344 unsigned n;
1345
1346 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1347
1348 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1349 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1350 goto fail;
1351 }
1352
1353 if (revents & ~POLLOUT) {
1354 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1355 goto fail;
1356
1357 u->first = TRUE;
1358 u->since_start = 0;
1359 } else if (revents && u->use_tsched && pa_log_ratelimit())
1360 pa_log_debug("Wakeup from ALSA!");
1361
1362 } else
1363 revents = 0;
1364 }
1365
1366 fail:
1367 /* If this was no regular exit from the loop we have to continue
1368 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1369 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1370 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1371
1372 finish:
1373 pa_log_debug("Thread shutting down");
1374 }
1375
1376 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1377 const char *n;
1378 char *t;
1379
1380 pa_assert(data);
1381 pa_assert(ma);
1382 pa_assert(device_name);
1383
1384 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1385 pa_sink_new_data_set_name(data, n);
1386 data->namereg_fail = TRUE;
1387 return;
1388 }
1389
1390 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1391 data->namereg_fail = TRUE;
1392 else {
1393 n = device_id ? device_id : device_name;
1394 data->namereg_fail = FALSE;
1395 }
1396
1397 if (mapping)
1398 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1399 else
1400 t = pa_sprintf_malloc("alsa_output.%s", n);
1401
1402 pa_sink_new_data_set_name(data, t);
1403 pa_xfree(t);
1404 }
1405
1406 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1407
1408 if (!mapping && !element)
1409 return;
1410
1411 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1412 pa_log_info("Failed to find a working mixer device.");
1413 return;
1414 }
1415
1416 if (element) {
1417
1418 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1419 goto fail;
1420
1421 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1422 goto fail;
1423
1424 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1425 pa_alsa_path_dump(u->mixer_path);
1426 } else {
1427
1428 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1429 goto fail;
1430
1431 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1432
1433 pa_log_debug("Probed mixer paths:");
1434 pa_alsa_path_set_dump(u->mixer_path_set);
1435 }
1436
1437 return;
1438
1439 fail:
1440
1441 if (u->mixer_path_set) {
1442 pa_alsa_path_set_free(u->mixer_path_set);
1443 u->mixer_path_set = NULL;
1444 } else if (u->mixer_path) {
1445 pa_alsa_path_free(u->mixer_path);
1446 u->mixer_path = NULL;
1447 }
1448
1449 if (u->mixer_handle) {
1450 snd_mixer_close(u->mixer_handle);
1451 u->mixer_handle = NULL;
1452 }
1453 }
1454
1455 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1456 pa_assert(u);
1457
1458 if (!u->mixer_handle)
1459 return 0;
1460
1461 if (u->sink->active_port) {
1462 pa_alsa_port_data *data;
1463
1464 /* We have a list of supported paths, so let's activate the
1465 * one that has been chosen as active */
1466
1467 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1468 u->mixer_path = data->path;
1469
1470 pa_alsa_path_select(data->path, u->mixer_handle);
1471
1472 if (data->setting)
1473 pa_alsa_setting_select(data->setting, u->mixer_handle);
1474
1475 } else {
1476
1477 if (!u->mixer_path && u->mixer_path_set)
1478 u->mixer_path = u->mixer_path_set->paths;
1479
1480 if (u->mixer_path) {
1481 /* Hmm, we have only a single path, then let's activate it */
1482
1483 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1484
1485 if (u->mixer_path->settings)
1486 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1487 } else
1488 return 0;
1489 }
1490
1491 if (!u->mixer_path->has_volume)
1492 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1493 else {
1494
1495 if (u->mixer_path->has_dB) {
1496 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1497
1498 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1499 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1500
1501 if (u->mixer_path->max_dB > 0.0)
1502 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1503 else
1504 pa_log_info("No particular base volume set, fixing to 0 dB");
1505
1506 } else {
1507 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1508 u->sink->base_volume = PA_VOLUME_NORM;
1509 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1510 }
1511
1512 u->sink->get_volume = sink_get_volume_cb;
1513 u->sink->set_volume = sink_set_volume_cb;
1514
1515 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1516 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1517 }
1518
1519 if (!u->mixer_path->has_mute) {
1520 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1521 } else {
1522 u->sink->get_mute = sink_get_mute_cb;
1523 u->sink->set_mute = sink_set_mute_cb;
1524 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1525 pa_log_info("Using hardware mute control.");
1526 }
1527
1528 u->mixer_fdl = pa_alsa_fdlist_new();
1529
1530 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1531 pa_log("Failed to initialize file descriptor monitoring");
1532 return -1;
1533 }
1534
1535 if (u->mixer_path_set)
1536 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1537 else
1538 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1539
1540 return 0;
1541 }
1542
1543 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1544
1545 struct userdata *u = NULL;
1546 const char *dev_id = NULL;
1547 pa_sample_spec ss, requested_ss;
1548 pa_channel_map map;
1549 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1550 snd_pcm_uframes_t period_frames, tsched_frames;
1551 size_t frame_size;
1552 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1553 pa_sink_new_data data;
1554 pa_alsa_profile_set *profile_set = NULL;
1555
1556 pa_assert(m);
1557 pa_assert(ma);
1558
1559 ss = m->core->default_sample_spec;
1560 map = m->core->default_channel_map;
1561 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1562 pa_log("Failed to parse sample specification and channel map");
1563 goto fail;
1564 }
1565
1566 requested_ss = ss;
1567 frame_size = pa_frame_size(&ss);
1568
1569 nfrags = m->core->default_n_fragments;
1570 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1571 if (frag_size <= 0)
1572 frag_size = (uint32_t) frame_size;
1573 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1574 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1575
1576 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1577 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1578 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1579 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1580 pa_log("Failed to parse buffer metrics");
1581 goto fail;
1582 }
1583
1584 hwbuf_size = frag_size * nfrags;
1585 period_frames = frag_size/frame_size;
1586 tsched_frames = tsched_size/frame_size;
1587
1588 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1589 pa_log("Failed to parse mmap argument.");
1590 goto fail;
1591 }
1592
1593 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1594 pa_log("Failed to parse tsched argument.");
1595 goto fail;
1596 }
1597
1598 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1599 pa_log("Failed to parse ignore_dB argument.");
1600 goto fail;
1601 }
1602
1603 if (use_tsched && !pa_rtclock_hrtimer()) {
1604 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1605 use_tsched = FALSE;
1606 }
1607
1608 u = pa_xnew0(struct userdata, 1);
1609 u->core = m->core;
1610 u->module = m;
1611 u->use_mmap = use_mmap;
1612 u->use_tsched = use_tsched;
1613 u->first = TRUE;
1614 u->rtpoll = pa_rtpoll_new();
1615 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1616
1617 u->smoother = pa_smoother_new(
1618 DEFAULT_TSCHED_BUFFER_USEC*2,
1619 DEFAULT_TSCHED_BUFFER_USEC*2,
1620 TRUE,
1621 TRUE,
1622 5,
1623 pa_rtclock_now(),
1624 TRUE);
1625
1626 dev_id = pa_modargs_get_value(
1627 ma, "device_id",
1628 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1629
1630 if (reserve_init(u, dev_id) < 0)
1631 goto fail;
1632
1633 if (reserve_monitor_init(u, dev_id) < 0)
1634 goto fail;
1635
1636 b = use_mmap;
1637 d = use_tsched;
1638
1639 if (mapping) {
1640
1641 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1642 pa_log("device_id= not set");
1643 goto fail;
1644 }
1645
1646 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1647 dev_id,
1648 &u->device_name,
1649 &ss, &map,
1650 SND_PCM_STREAM_PLAYBACK,
1651 &nfrags, &period_frames, tsched_frames,
1652 &b, &d, mapping)))
1653
1654 goto fail;
1655
1656 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1657
1658 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1659 goto fail;
1660
1661 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1662 dev_id,
1663 &u->device_name,
1664 &ss, &map,
1665 SND_PCM_STREAM_PLAYBACK,
1666 &nfrags, &period_frames, tsched_frames,
1667 &b, &d, profile_set, &mapping)))
1668
1669 goto fail;
1670
1671 } else {
1672
1673 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1674 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1675 &u->device_name,
1676 &ss, &map,
1677 SND_PCM_STREAM_PLAYBACK,
1678 &nfrags, &period_frames, tsched_frames,
1679 &b, &d, FALSE)))
1680 goto fail;
1681 }
1682
1683 pa_assert(u->device_name);
1684 pa_log_info("Successfully opened device %s.", u->device_name);
1685
1686 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1687 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1688 goto fail;
1689 }
1690
1691 if (mapping)
1692 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1693
1694 if (use_mmap && !b) {
1695 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1696 u->use_mmap = use_mmap = FALSE;
1697 }
1698
1699 if (use_tsched && (!b || !d)) {
1700 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1701 u->use_tsched = use_tsched = FALSE;
1702 }
1703
1704 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1705 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1706 u->use_tsched = use_tsched = FALSE;
1707 }
1708
1709 if (u->use_mmap)
1710 pa_log_info("Successfully enabled mmap() mode.");
1711
1712 if (u->use_tsched)
1713 pa_log_info("Successfully enabled timer-based scheduling mode.");
1714
1715 /* ALSA might tweak the sample spec, so recalculate the frame size */
1716 frame_size = pa_frame_size(&ss);
1717
1718 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1719
1720 pa_sink_new_data_init(&data);
1721 data.driver = driver;
1722 data.module = m;
1723 data.card = card;
1724 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1725 pa_sink_new_data_set_sample_spec(&data, &ss);
1726 pa_sink_new_data_set_channel_map(&data, &map);
1727
1728 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1729 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1730 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1731 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1732 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1733
1734 if (mapping) {
1735 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1736 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1737 }
1738
1739 pa_alsa_init_description(data.proplist);
1740
1741 if (u->control_device)
1742 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1743
1744 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1745 pa_log("Invalid properties");
1746 pa_sink_new_data_done(&data);
1747 goto fail;
1748 }
1749
1750 if (u->mixer_path_set)
1751 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1752
1753 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1754 pa_sink_new_data_done(&data);
1755
1756 if (!u->sink) {
1757 pa_log("Failed to create sink object");
1758 goto fail;
1759 }
1760
1761 u->sink->parent.process_msg = sink_process_msg;
1762 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1763 u->sink->set_state = sink_set_state_cb;
1764 u->sink->set_port = sink_set_port_cb;
1765 u->sink->userdata = u;
1766
1767 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1768 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1769
1770 u->frame_size = frame_size;
1771 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1772 u->nfragments = nfrags;
1773 u->hwbuf_size = u->fragment_size * nfrags;
1774 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1775 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1776
1777 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1778 nfrags, (long unsigned) u->fragment_size,
1779 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1780
1781 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1782 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1783
1784 if (u->use_tsched) {
1785 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1786
1787 fix_min_sleep_wakeup(u);
1788 fix_tsched_watermark(u);
1789
1790 pa_sink_set_latency_range(u->sink,
1791 0,
1792 pa_bytes_to_usec(u->hwbuf_size, &ss));
1793
1794 pa_log_info("Time scheduling watermark is %0.2fms",
1795 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1796 } else
1797 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1798
1799 reserve_update(u);
1800
1801 if (update_sw_params(u) < 0)
1802 goto fail;
1803
1804 if (setup_mixer(u, ignore_dB) < 0)
1805 goto fail;
1806
1807 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1808
1809 if (!(u->thread = pa_thread_new(thread_func, u))) {
1810 pa_log("Failed to create thread.");
1811 goto fail;
1812 }
1813
1814 /* Get initial mixer settings */
1815 if (data.volume_is_set) {
1816 if (u->sink->set_volume)
1817 u->sink->set_volume(u->sink);
1818 } else {
1819 if (u->sink->get_volume)
1820 u->sink->get_volume(u->sink);
1821 }
1822
1823 if (data.muted_is_set) {
1824 if (u->sink->set_mute)
1825 u->sink->set_mute(u->sink);
1826 } else {
1827 if (u->sink->get_mute)
1828 u->sink->get_mute(u->sink);
1829 }
1830
1831 pa_sink_put(u->sink);
1832
1833 if (profile_set)
1834 pa_alsa_profile_set_free(profile_set);
1835
1836 return u->sink;
1837
1838 fail:
1839
1840 if (u)
1841 userdata_free(u);
1842
1843 if (profile_set)
1844 pa_alsa_profile_set_free(profile_set);
1845
1846 return NULL;
1847 }
1848
1849 static void userdata_free(struct userdata *u) {
1850 pa_assert(u);
1851
1852 if (u->sink)
1853 pa_sink_unlink(u->sink);
1854
1855 if (u->thread) {
1856 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1857 pa_thread_free(u->thread);
1858 }
1859
1860 pa_thread_mq_done(&u->thread_mq);
1861
1862 if (u->sink)
1863 pa_sink_unref(u->sink);
1864
1865 if (u->memchunk.memblock)
1866 pa_memblock_unref(u->memchunk.memblock);
1867
1868 if (u->alsa_rtpoll_item)
1869 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1870
1871 if (u->rtpoll)
1872 pa_rtpoll_free(u->rtpoll);
1873
1874 if (u->pcm_handle) {
1875 snd_pcm_drop(u->pcm_handle);
1876 snd_pcm_close(u->pcm_handle);
1877 }
1878
1879 if (u->mixer_fdl)
1880 pa_alsa_fdlist_free(u->mixer_fdl);
1881
1882 if (u->mixer_path_set)
1883 pa_alsa_path_set_free(u->mixer_path_set);
1884 else if (u->mixer_path)
1885 pa_alsa_path_free(u->mixer_path);
1886
1887 if (u->mixer_handle)
1888 snd_mixer_close(u->mixer_handle);
1889
1890 if (u->smoother)
1891 pa_smoother_free(u->smoother);
1892
1893 reserve_done(u);
1894 monitor_done(u);
1895
1896 pa_xfree(u->device_name);
1897 pa_xfree(u->control_device);
1898 pa_xfree(u);
1899 }
1900
1901 void pa_alsa_sink_free(pa_sink *s) {
1902 struct userdata *u;
1903
1904 pa_sink_assert_ref(s);
1905 pa_assert_se(u = s->userdata);
1906
1907 userdata_free(u);
1908 }