]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: monitor device reservation status and resume automatically when device becomes...
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 snd_mixer_elem_t *mixer_elem;
84 long hw_volume_max, hw_volume_min;
85 long hw_dB_max, hw_dB_min;
86 pa_bool_t hw_dB_supported:1;
87 pa_bool_t mixer_seperate_channels:1;
88 pa_cvolume hardware_volume;
89
90 size_t
91 frame_size,
92 fragment_size,
93 hwbuf_size,
94 tsched_watermark,
95 hwbuf_unused,
96 min_sleep,
97 min_wakeup,
98 watermark_step;
99
100 unsigned nfragments;
101 pa_memchunk memchunk;
102
103 char *device_name;
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 /* We are resuming, try to lock the device */
173 if (!(rname = pa_alsa_get_reserve_name(dname)))
174 return 0;
175
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 if (!(rname = pa_alsa_get_reserve_name(dname)))
227 return 0;
228
229 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
230 pa_xfree(rname);
231
232 if (!(u->monitor))
233 return -1;
234
235 pa_assert(!u->monitor_slot);
236 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
237
238 return 0;
239 }
240
241 static void fix_min_sleep_wakeup(struct userdata *u) {
242 size_t max_use, max_use_2;
243
244 pa_assert(u);
245
246 max_use = u->hwbuf_size - u->hwbuf_unused;
247 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
248
249 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
250 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
251
252 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
253 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
254 }
255
256 static void fix_tsched_watermark(struct userdata *u) {
257 size_t max_use;
258 pa_assert(u);
259
260 max_use = u->hwbuf_size - u->hwbuf_unused;
261
262 if (u->tsched_watermark > max_use - u->min_sleep)
263 u->tsched_watermark = max_use - u->min_sleep;
264
265 if (u->tsched_watermark < u->min_wakeup)
266 u->tsched_watermark = u->min_wakeup;
267 }
268
269 static void adjust_after_underrun(struct userdata *u) {
270 size_t old_watermark;
271 pa_usec_t old_min_latency, new_min_latency;
272
273 pa_assert(u);
274 pa_assert(u->use_tsched);
275
276 /* First, just try to increase the watermark */
277 old_watermark = u->tsched_watermark;
278 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
279 fix_tsched_watermark(u);
280
281 if (old_watermark != u->tsched_watermark) {
282 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
283 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
284 return;
285 }
286
287 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
288 old_min_latency = u->sink->thread_info.min_latency;
289 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
290 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
291
292 if (old_min_latency != new_min_latency) {
293 pa_log_notice("Increasing minimal latency to %0.2f ms",
294 (double) new_min_latency / PA_USEC_PER_MSEC);
295
296 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
297 return;
298 }
299
300 /* When we reach this we're officialy fucked! */
301 }
302
303 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
304 pa_usec_t usec, wm;
305
306 pa_assert(sleep_usec);
307 pa_assert(process_usec);
308
309 pa_assert(u);
310
311 usec = pa_sink_get_requested_latency_within_thread(u->sink);
312
313 if (usec == (pa_usec_t) -1)
314 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
315
316 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
317
318 if (wm > usec)
319 wm = usec/2;
320
321 *sleep_usec = usec - wm;
322 *process_usec = wm;
323
324 #ifdef DEBUG_TIMING
325 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
326 (unsigned long) (usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
328 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
329 #endif
330 }
331
332 static int try_recover(struct userdata *u, const char *call, int err) {
333 pa_assert(u);
334 pa_assert(call);
335 pa_assert(err < 0);
336
337 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
338
339 pa_assert(err != -EAGAIN);
340
341 if (err == -EPIPE)
342 pa_log_debug("%s: Buffer underrun!", call);
343
344 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
345 pa_log("%s: %s", call, pa_alsa_strerror(err));
346 return -1;
347 }
348
349 u->first = TRUE;
350 u->since_start = 0;
351 return 0;
352 }
353
354 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
355 size_t left_to_play;
356
357 /* We use <= instead of < for this check here because an underrun
358 * only happens after the last sample was processed, not already when
359 * it is removed from the buffer. This is particularly important
360 * when block transfer is used. */
361
362 if (n_bytes <= u->hwbuf_size) {
363 left_to_play = u->hwbuf_size - n_bytes;
364
365 #ifdef DEBUG_TIMING
366 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
367 #endif
368
369 } else {
370 left_to_play = 0;
371
372 #ifdef DEBUG_TIMING
373 PA_DEBUG_TRAP;
374 #endif
375
376 if (!u->first && !u->after_rewind) {
377
378 if (pa_log_ratelimit())
379 pa_log_info("Underrun!");
380
381 if (u->use_tsched)
382 adjust_after_underrun(u);
383 }
384 }
385
386 return left_to_play;
387 }
388
389 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
390 pa_bool_t work_done = TRUE;
391 pa_usec_t max_sleep_usec = 0, process_usec = 0;
392 size_t left_to_play;
393 unsigned j = 0;
394
395 pa_assert(u);
396 pa_sink_assert_ref(u->sink);
397
398 if (u->use_tsched)
399 hw_sleep_time(u, &max_sleep_usec, &process_usec);
400
401 for (;;) {
402 snd_pcm_sframes_t n;
403 size_t n_bytes;
404 int r;
405
406 /* First we determine how many samples are missing to fill the
407 * buffer up to 100% */
408
409 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
410
411 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
412 continue;
413
414 return r;
415 }
416
417 n_bytes = (size_t) n * u->frame_size;
418
419 #ifdef DEBUG_TIMING
420 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
421 #endif
422
423 left_to_play = check_left_to_play(u, n_bytes);
424
425 if (u->use_tsched)
426
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
432
433 if (!polled &&
434 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
435 #ifdef DEBUG_TIMING
436 pa_log_debug("Not filling up, because too early.");
437 #endif
438 break;
439 }
440
441 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
442
443 if (polled)
444 PA_ONCE_BEGIN {
445 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
446 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
447 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
448 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
449 pa_strnull(dn));
450 pa_xfree(dn);
451 } PA_ONCE_END;
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("Not filling up, because not necessary.");
455 #endif
456 break;
457 }
458
459
460 if (++j > 10) {
461 #ifdef DEBUG_TIMING
462 pa_log_debug("Not filling up, because already too many iterations.");
463 #endif
464
465 break;
466 }
467
468 n_bytes -= u->hwbuf_unused;
469 polled = FALSE;
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Filling up");
473 #endif
474
475 for (;;) {
476 pa_memchunk chunk;
477 void *p;
478 int err;
479 const snd_pcm_channel_area_t *areas;
480 snd_pcm_uframes_t offset, frames;
481 snd_pcm_sframes_t sframes;
482
483 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
484 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
485
486 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
487
488 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
489 continue;
490
491 return r;
492 }
493
494 /* Make sure that if these memblocks need to be copied they will fit into one slot */
495 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
496 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
497
498 /* Check these are multiples of 8 bit */
499 pa_assert((areas[0].first & 7) == 0);
500 pa_assert((areas[0].step & 7)== 0);
501
502 /* We assume a single interleaved memory buffer */
503 pa_assert((areas[0].first >> 3) == 0);
504 pa_assert((areas[0].step >> 3) == u->frame_size);
505
506 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
507
508 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
509 chunk.length = pa_memblock_get_length(chunk.memblock);
510 chunk.index = 0;
511
512 pa_sink_render_into_full(u->sink, &chunk);
513 pa_memblock_unref_fixed(chunk.memblock);
514
515 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
516
517 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
518 continue;
519
520 return r;
521 }
522
523 work_done = TRUE;
524
525 u->write_count += frames * u->frame_size;
526 u->since_start += frames * u->frame_size;
527
528 #ifdef DEBUG_TIMING
529 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
530 #endif
531
532 if ((size_t) frames * u->frame_size >= n_bytes)
533 break;
534
535 n_bytes -= (size_t) frames * u->frame_size;
536 }
537 }
538
539 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
540
541 if (*sleep_usec > process_usec)
542 *sleep_usec -= process_usec;
543 else
544 *sleep_usec = 0;
545
546 return work_done ? 1 : 0;
547 }
548
549 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
550 pa_bool_t work_done = FALSE;
551 pa_usec_t max_sleep_usec = 0, process_usec = 0;
552 size_t left_to_play;
553 unsigned j = 0;
554
555 pa_assert(u);
556 pa_sink_assert_ref(u->sink);
557
558 if (u->use_tsched)
559 hw_sleep_time(u, &max_sleep_usec, &process_usec);
560
561 for (;;) {
562 snd_pcm_sframes_t n;
563 size_t n_bytes;
564 int r;
565
566 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
567
568 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
569 continue;
570
571 return r;
572 }
573
574 n_bytes = (size_t) n * u->frame_size;
575 left_to_play = check_left_to_play(u, n_bytes);
576
577 if (u->use_tsched)
578
579 /* We won't fill up the playback buffer before at least
580 * half the sleep time is over because otherwise we might
581 * ask for more data from the clients then they expect. We
582 * need to guarantee that clients only have to keep around
583 * a single hw buffer length. */
584
585 if (!polled &&
586 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
587 break;
588
589 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
590
591 if (polled)
592 PA_ONCE_BEGIN {
593 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
594 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
595 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
596 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
597 pa_strnull(dn));
598 pa_xfree(dn);
599 } PA_ONCE_END;
600
601 break;
602 }
603
604 if (++j > 10) {
605 #ifdef DEBUG_TIMING
606 pa_log_debug("Not filling up, because already too many iterations.");
607 #endif
608
609 break;
610 }
611
612 n_bytes -= u->hwbuf_unused;
613 polled = FALSE;
614
615 for (;;) {
616 snd_pcm_sframes_t frames;
617 void *p;
618
619 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
620
621 if (u->memchunk.length <= 0)
622 pa_sink_render(u->sink, n_bytes, &u->memchunk);
623
624 pa_assert(u->memchunk.length > 0);
625
626 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
627
628 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
629 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
630
631 p = pa_memblock_acquire(u->memchunk.memblock);
632 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
633 pa_memblock_release(u->memchunk.memblock);
634
635 pa_assert(frames != 0);
636
637 if (PA_UNLIKELY(frames < 0)) {
638
639 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
640 continue;
641
642 return r;
643 }
644
645 u->memchunk.index += (size_t) frames * u->frame_size;
646 u->memchunk.length -= (size_t) frames * u->frame_size;
647
648 if (u->memchunk.length <= 0) {
649 pa_memblock_unref(u->memchunk.memblock);
650 pa_memchunk_reset(&u->memchunk);
651 }
652
653 work_done = TRUE;
654
655 u->write_count += frames * u->frame_size;
656 u->since_start += frames * u->frame_size;
657
658 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
659
660 if ((size_t) frames * u->frame_size >= n_bytes)
661 break;
662
663 n_bytes -= (size_t) frames * u->frame_size;
664 }
665 }
666
667 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
668
669 if (*sleep_usec > process_usec)
670 *sleep_usec -= process_usec;
671 else
672 *sleep_usec = 0;
673
674 return work_done ? 1 : 0;
675 }
676
677 static void update_smoother(struct userdata *u) {
678 snd_pcm_sframes_t delay = 0;
679 int64_t position;
680 int err;
681 pa_usec_t now1 = 0, now2;
682 snd_pcm_status_t *status;
683
684 snd_pcm_status_alloca(&status);
685
686 pa_assert(u);
687 pa_assert(u->pcm_handle);
688
689 /* Let's update the time smoother */
690
691 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
692 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
693 return;
694 }
695
696 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
697 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
698 else {
699 snd_htimestamp_t htstamp = { 0, 0 };
700 snd_pcm_status_get_htstamp(status, &htstamp);
701 now1 = pa_timespec_load(&htstamp);
702 }
703
704 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
705
706 if (PA_UNLIKELY(position < 0))
707 position = 0;
708
709 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
710 if (now1 <= 0)
711 now1 = pa_rtclock_usec();
712
713 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
714
715 pa_smoother_put(u->smoother, now1, now2);
716 }
717
718 static pa_usec_t sink_get_latency(struct userdata *u) {
719 pa_usec_t r;
720 int64_t delay;
721 pa_usec_t now1, now2;
722
723 pa_assert(u);
724
725 now1 = pa_rtclock_usec();
726 now2 = pa_smoother_get(u->smoother, now1);
727
728 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
729
730 r = delay >= 0 ? (pa_usec_t) delay : 0;
731
732 if (u->memchunk.memblock)
733 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
734
735 return r;
736 }
737
738 static int build_pollfd(struct userdata *u) {
739 pa_assert(u);
740 pa_assert(u->pcm_handle);
741
742 if (u->alsa_rtpoll_item)
743 pa_rtpoll_item_free(u->alsa_rtpoll_item);
744
745 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
746 return -1;
747
748 return 0;
749 }
750
751 /* Called from IO context */
752 static int suspend(struct userdata *u) {
753 pa_assert(u);
754 pa_assert(u->pcm_handle);
755
756 pa_smoother_pause(u->smoother, pa_rtclock_usec());
757
758 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
759 * take awfully long with our long buffer sizes today. */
760 snd_pcm_close(u->pcm_handle);
761 u->pcm_handle = NULL;
762
763 if (u->alsa_rtpoll_item) {
764 pa_rtpoll_item_free(u->alsa_rtpoll_item);
765 u->alsa_rtpoll_item = NULL;
766 }
767
768 pa_log_info("Device suspended...");
769
770 return 0;
771 }
772
773 /* Called from IO context */
774 static int update_sw_params(struct userdata *u) {
775 snd_pcm_uframes_t avail_min;
776 int err;
777
778 pa_assert(u);
779
780 /* Use the full buffer if noone asked us for anything specific */
781 u->hwbuf_unused = 0;
782
783 if (u->use_tsched) {
784 pa_usec_t latency;
785
786 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
787 size_t b;
788
789 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
790
791 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
792
793 /* We need at least one sample in our buffer */
794
795 if (PA_UNLIKELY(b < u->frame_size))
796 b = u->frame_size;
797
798 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
799 }
800
801 fix_min_sleep_wakeup(u);
802 fix_tsched_watermark(u);
803 }
804
805 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
806
807 /* We need at last one frame in the used part of the buffer */
808 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
809
810 if (u->use_tsched) {
811 pa_usec_t sleep_usec, process_usec;
812
813 hw_sleep_time(u, &sleep_usec, &process_usec);
814 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
815 }
816
817 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
818
819 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
820 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
821 return err;
822 }
823
824 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
825
826 return 0;
827 }
828
829 /* Called from IO context */
830 static int unsuspend(struct userdata *u) {
831 pa_sample_spec ss;
832 int err;
833 pa_bool_t b, d;
834 unsigned nfrags;
835 snd_pcm_uframes_t period_size;
836
837 pa_assert(u);
838 pa_assert(!u->pcm_handle);
839
840 pa_log_info("Trying resume...");
841
842 snd_config_update_free_global();
843 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
844 /*SND_PCM_NONBLOCK|*/
845 SND_PCM_NO_AUTO_RESAMPLE|
846 SND_PCM_NO_AUTO_CHANNELS|
847 SND_PCM_NO_AUTO_FORMAT)) < 0) {
848 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
849 goto fail;
850 }
851
852 ss = u->sink->sample_spec;
853 nfrags = u->nfragments;
854 period_size = u->fragment_size / u->frame_size;
855 b = u->use_mmap;
856 d = u->use_tsched;
857
858 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
859 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
860 goto fail;
861 }
862
863 if (b != u->use_mmap || d != u->use_tsched) {
864 pa_log_warn("Resume failed, couldn't get original access mode.");
865 goto fail;
866 }
867
868 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
869 pa_log_warn("Resume failed, couldn't restore original sample settings.");
870 goto fail;
871 }
872
873 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
874 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
875 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
876 (unsigned long) nfrags, period_size * u->frame_size);
877 goto fail;
878 }
879
880 if (update_sw_params(u) < 0)
881 goto fail;
882
883 if (build_pollfd(u) < 0)
884 goto fail;
885
886 u->first = TRUE;
887 u->since_start = 0;
888
889 pa_log_info("Resumed successfully...");
890
891 return 0;
892
893 fail:
894 if (u->pcm_handle) {
895 snd_pcm_close(u->pcm_handle);
896 u->pcm_handle = NULL;
897 }
898
899 return -1;
900 }
901
902 /* Called from IO context */
903 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
904 struct userdata *u = PA_SINK(o)->userdata;
905
906 switch (code) {
907
908 case PA_SINK_MESSAGE_GET_LATENCY: {
909 pa_usec_t r = 0;
910
911 if (u->pcm_handle)
912 r = sink_get_latency(u);
913
914 *((pa_usec_t*) data) = r;
915
916 return 0;
917 }
918
919 case PA_SINK_MESSAGE_SET_STATE:
920
921 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
922
923 case PA_SINK_SUSPENDED:
924 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
925
926 if (suspend(u) < 0)
927 return -1;
928
929 break;
930
931 case PA_SINK_IDLE:
932 case PA_SINK_RUNNING:
933
934 if (u->sink->thread_info.state == PA_SINK_INIT) {
935 if (build_pollfd(u) < 0)
936 return -1;
937 }
938
939 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
940 if (unsuspend(u) < 0)
941 return -1;
942 }
943
944 break;
945
946 case PA_SINK_UNLINKED:
947 case PA_SINK_INIT:
948 case PA_SINK_INVALID_STATE:
949 ;
950 }
951
952 break;
953 }
954
955 return pa_sink_process_msg(o, code, data, offset, chunk);
956 }
957
958 /* Called from main context */
959 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
960 pa_sink_state_t old_state;
961 struct userdata *u;
962
963 pa_sink_assert_ref(s);
964 pa_assert_se(u = s->userdata);
965
966 old_state = pa_sink_get_state(u->sink);
967
968 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
969 reserve_done(u);
970 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
971 if (reserve_init(u, u->device_name) < 0)
972 return -1;
973
974 return 0;
975 }
976
977 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
978 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
979
980 pa_assert(u);
981 pa_assert(u->mixer_handle);
982
983 if (mask == SND_CTL_EVENT_MASK_REMOVE)
984 return 0;
985
986 if (mask & SND_CTL_EVENT_MASK_VALUE) {
987 pa_sink_get_volume(u->sink, TRUE, FALSE);
988 pa_sink_get_mute(u->sink, TRUE);
989 }
990
991 return 0;
992 }
993
994 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
995
996 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
997 (double) (u->hw_volume_max - u->hw_volume_min));
998 }
999
1000 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
1001 long alsa_vol;
1002
1003 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
1004 / PA_VOLUME_NORM) + u->hw_volume_min;
1005
1006 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
1007 }
1008
1009 static void sink_get_volume_cb(pa_sink *s) {
1010 struct userdata *u = s->userdata;
1011 int err;
1012 unsigned i;
1013 pa_cvolume r;
1014 char t[PA_CVOLUME_SNPRINT_MAX];
1015
1016 pa_assert(u);
1017 pa_assert(u->mixer_elem);
1018
1019 if (u->mixer_seperate_channels) {
1020
1021 r.channels = s->sample_spec.channels;
1022
1023 for (i = 0; i < s->sample_spec.channels; i++) {
1024 long alsa_vol;
1025
1026 if (u->hw_dB_supported) {
1027
1028 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1029 goto fail;
1030
1031 #ifdef HAVE_VALGRIND_MEMCHECK_H
1032 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1033 #endif
1034
1035 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1036 } else {
1037
1038 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1039 goto fail;
1040
1041 r.values[i] = from_alsa_volume(u, alsa_vol);
1042 }
1043 }
1044
1045 } else {
1046 long alsa_vol;
1047
1048 if (u->hw_dB_supported) {
1049
1050 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1051 goto fail;
1052
1053 #ifdef HAVE_VALGRIND_MEMCHECK_H
1054 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1055 #endif
1056
1057 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1058
1059 } else {
1060
1061 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1062 goto fail;
1063
1064 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1065 }
1066 }
1067
1068 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1069
1070 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
1071
1072 s->virtual_volume = u->hardware_volume = r;
1073
1074 if (u->hw_dB_supported) {
1075 pa_cvolume reset;
1076
1077 /* Hmm, so the hardware volume changed, let's reset our software volume */
1078 pa_cvolume_reset(&reset, s->sample_spec.channels);
1079 pa_sink_set_soft_volume(s, &reset);
1080 }
1081 }
1082
1083 return;
1084
1085 fail:
1086 pa_log_error("Unable to read volume: %s", pa_alsa_strerror(err));
1087 }
1088
1089 static void sink_set_volume_cb(pa_sink *s) {
1090 struct userdata *u = s->userdata;
1091 int err;
1092 unsigned i;
1093 pa_cvolume r;
1094
1095 pa_assert(u);
1096 pa_assert(u->mixer_elem);
1097
1098 if (u->mixer_seperate_channels) {
1099
1100 r.channels = s->sample_spec.channels;
1101
1102 for (i = 0; i < s->sample_spec.channels; i++) {
1103 long alsa_vol;
1104 pa_volume_t vol;
1105
1106 vol = s->virtual_volume.values[i];
1107
1108 if (u->hw_dB_supported) {
1109
1110 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1111 alsa_vol += u->hw_dB_max;
1112 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1113
1114 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
1115 goto fail;
1116
1117 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1118 goto fail;
1119
1120 #ifdef HAVE_VALGRIND_MEMCHECK_H
1121 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1122 #endif
1123
1124 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1125
1126 } else {
1127 alsa_vol = to_alsa_volume(u, vol);
1128
1129 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
1130 goto fail;
1131
1132 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1133 goto fail;
1134
1135 r.values[i] = from_alsa_volume(u, alsa_vol);
1136 }
1137 }
1138
1139 } else {
1140 pa_volume_t vol;
1141 long alsa_vol;
1142
1143 vol = pa_cvolume_max(&s->virtual_volume);
1144
1145 if (u->hw_dB_supported) {
1146 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1147 alsa_vol += u->hw_dB_max;
1148 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1149
1150 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
1151 goto fail;
1152
1153 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1154 goto fail;
1155
1156 #ifdef HAVE_VALGRIND_MEMCHECK_H
1157 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1158 #endif
1159
1160 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1161
1162 } else {
1163 alsa_vol = to_alsa_volume(u, vol);
1164
1165 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
1166 goto fail;
1167
1168 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1169 goto fail;
1170
1171 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1172 }
1173 }
1174
1175 u->hardware_volume = r;
1176
1177 if (u->hw_dB_supported) {
1178 char t[PA_CVOLUME_SNPRINT_MAX];
1179
1180 /* Match exactly what the user requested by software */
1181 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1182
1183 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1184 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1185 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1186
1187 } else
1188
1189 /* We can't match exactly what the user requested, hence let's
1190 * at least tell the user about it */
1191
1192 s->virtual_volume = r;
1193
1194 return;
1195
1196 fail:
1197 pa_log_error("Unable to set volume: %s", pa_alsa_strerror(err));
1198 }
1199
1200 static void sink_get_mute_cb(pa_sink *s) {
1201 struct userdata *u = s->userdata;
1202 int err, sw;
1203
1204 pa_assert(u);
1205 pa_assert(u->mixer_elem);
1206
1207 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1208 pa_log_error("Unable to get switch: %s", pa_alsa_strerror(err));
1209 return;
1210 }
1211
1212 s->muted = !sw;
1213 }
1214
1215 static void sink_set_mute_cb(pa_sink *s) {
1216 struct userdata *u = s->userdata;
1217 int err;
1218
1219 pa_assert(u);
1220 pa_assert(u->mixer_elem);
1221
1222 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1223 pa_log_error("Unable to set switch: %s", pa_alsa_strerror(err));
1224 return;
1225 }
1226 }
1227
1228 static void sink_update_requested_latency_cb(pa_sink *s) {
1229 struct userdata *u = s->userdata;
1230 size_t before;
1231 pa_assert(u);
1232
1233 if (!u->pcm_handle)
1234 return;
1235
1236 before = u->hwbuf_unused;
1237 update_sw_params(u);
1238
1239 /* Let's check whether we now use only a smaller part of the
1240 buffer then before. If so, we need to make sure that subsequent
1241 rewinds are relative to the new maximum fill level and not to the
1242 current fill level. Thus, let's do a full rewind once, to clear
1243 things up. */
1244
1245 if (u->hwbuf_unused > before) {
1246 pa_log_debug("Requesting rewind due to latency change.");
1247 pa_sink_request_rewind(s, (size_t) -1);
1248 }
1249 }
1250
1251 static int process_rewind(struct userdata *u) {
1252 snd_pcm_sframes_t unused;
1253 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1254 pa_assert(u);
1255
1256 /* Figure out how much we shall rewind and reset the counter */
1257 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1258
1259 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1260
1261 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1262 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1263 return -1;
1264 }
1265
1266 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1267
1268 if (u->hwbuf_size > unused_nbytes)
1269 limit_nbytes = u->hwbuf_size - unused_nbytes;
1270 else
1271 limit_nbytes = 0;
1272
1273 if (rewind_nbytes > limit_nbytes)
1274 rewind_nbytes = limit_nbytes;
1275
1276 if (rewind_nbytes > 0) {
1277 snd_pcm_sframes_t in_frames, out_frames;
1278
1279 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1280
1281 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1282 pa_log_debug("before: %lu", (unsigned long) in_frames);
1283 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1284 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1285 return -1;
1286 }
1287 pa_log_debug("after: %lu", (unsigned long) out_frames);
1288
1289 rewind_nbytes = (size_t) out_frames * u->frame_size;
1290
1291 if (rewind_nbytes <= 0)
1292 pa_log_info("Tried rewind, but was apparently not possible.");
1293 else {
1294 u->write_count -= out_frames * u->frame_size;
1295 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1296 pa_sink_process_rewind(u->sink, rewind_nbytes);
1297
1298 u->after_rewind = TRUE;
1299 return 0;
1300 }
1301 } else
1302 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1303
1304 pa_sink_process_rewind(u->sink, 0);
1305 return 0;
1306 }
1307
1308 static void thread_func(void *userdata) {
1309 struct userdata *u = userdata;
1310 unsigned short revents = 0;
1311
1312 pa_assert(u);
1313
1314 pa_log_debug("Thread starting up");
1315
1316 if (u->core->realtime_scheduling)
1317 pa_make_realtime(u->core->realtime_priority);
1318
1319 pa_thread_mq_install(&u->thread_mq);
1320 pa_rtpoll_install(u->rtpoll);
1321
1322 for (;;) {
1323 int ret;
1324
1325 #ifdef DEBUG_TIMING
1326 pa_log_debug("Loop");
1327 #endif
1328
1329 /* Render some data and write it to the dsp */
1330 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1331 int work_done;
1332 pa_usec_t sleep_usec = 0;
1333
1334 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1335 if (process_rewind(u) < 0)
1336 goto fail;
1337
1338 if (u->use_mmap)
1339 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1340 else
1341 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1342
1343 if (work_done < 0)
1344 goto fail;
1345
1346 /* pa_log_debug("work_done = %i", work_done); */
1347
1348 if (work_done) {
1349
1350 if (u->first) {
1351 pa_log_info("Starting playback.");
1352 snd_pcm_start(u->pcm_handle);
1353
1354 pa_smoother_resume(u->smoother, pa_rtclock_usec(), TRUE);
1355 }
1356
1357 update_smoother(u);
1358 }
1359
1360 if (u->use_tsched) {
1361 pa_usec_t cusec;
1362
1363 if (u->since_start <= u->hwbuf_size) {
1364
1365 /* USB devices on ALSA seem to hit a buffer
1366 * underrun during the first iterations much
1367 * quicker then we calculate here, probably due to
1368 * the transport latency. To accommodate for that
1369 * we artificially decrease the sleep time until
1370 * we have filled the buffer at least once
1371 * completely.*/
1372
1373 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1374 sleep_usec /= 2;
1375 }
1376
1377 /* OK, the playback buffer is now full, let's
1378 * calculate when to wake up next */
1379 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1380
1381 /* Convert from the sound card time domain to the
1382 * system time domain */
1383 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1384
1385 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1386
1387 /* We don't trust the conversion, so we wake up whatever comes first */
1388 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1389 }
1390
1391 u->first = FALSE;
1392 u->after_rewind = FALSE;
1393
1394 } else if (u->use_tsched)
1395
1396 /* OK, we're in an invalid state, let's disable our timers */
1397 pa_rtpoll_set_timer_disabled(u->rtpoll);
1398
1399 /* Hmm, nothing to do. Let's sleep */
1400 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1401 goto fail;
1402
1403 if (ret == 0)
1404 goto finish;
1405
1406 /* Tell ALSA about this and process its response */
1407 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1408 struct pollfd *pollfd;
1409 int err;
1410 unsigned n;
1411
1412 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1413
1414 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1415 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1416 goto fail;
1417 }
1418
1419 if (revents & ~POLLOUT) {
1420 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1421 goto fail;
1422
1423 u->first = TRUE;
1424 u->since_start = 0;
1425 } else if (revents && u->use_tsched && pa_log_ratelimit())
1426 pa_log_debug("Wakeup from ALSA!");
1427
1428 } else
1429 revents = 0;
1430 }
1431
1432 fail:
1433 /* If this was no regular exit from the loop we have to continue
1434 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1435 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1436 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1437
1438 finish:
1439 pa_log_debug("Thread shutting down");
1440 }
1441
1442 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1443 const char *n;
1444 char *t;
1445
1446 pa_assert(data);
1447 pa_assert(ma);
1448 pa_assert(device_name);
1449
1450 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1451 pa_sink_new_data_set_name(data, n);
1452 data->namereg_fail = TRUE;
1453 return;
1454 }
1455
1456 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1457 data->namereg_fail = TRUE;
1458 else {
1459 n = device_id ? device_id : device_name;
1460 data->namereg_fail = FALSE;
1461 }
1462
1463 t = pa_sprintf_malloc("alsa_output.%s", n);
1464 pa_sink_new_data_set_name(data, t);
1465 pa_xfree(t);
1466 }
1467
1468 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1469 pa_assert(u);
1470
1471 if (!u->mixer_handle)
1472 return 0;
1473
1474 pa_assert(u->mixer_elem);
1475
1476 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1477 pa_bool_t suitable = FALSE;
1478
1479 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1480 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1481 else if (u->hw_volume_min >= u->hw_volume_max)
1482 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1483 else {
1484 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1485 suitable = TRUE;
1486 }
1487
1488 if (suitable) {
1489 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1490 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1491 else {
1492 #ifdef HAVE_VALGRIND_MEMCHECK_H
1493 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1494 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1495 #endif
1496
1497 if (u->hw_dB_min >= u->hw_dB_max)
1498 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1499 else {
1500 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1501 u->hw_dB_supported = TRUE;
1502
1503 if (u->hw_dB_max > 0) {
1504 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1505 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1506 } else
1507 pa_log_info("No particular base volume set, fixing to 0 dB");
1508 }
1509 }
1510
1511 if (!u->hw_dB_supported &&
1512 u->hw_volume_max - u->hw_volume_min < 3) {
1513
1514 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1515 suitable = FALSE;
1516 }
1517 }
1518
1519 if (suitable) {
1520 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1521
1522 u->sink->get_volume = sink_get_volume_cb;
1523 u->sink->set_volume = sink_set_volume_cb;
1524 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1525 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1526
1527 if (!u->hw_dB_supported)
1528 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1529 } else
1530 pa_log_info("Using software volume control.");
1531 }
1532
1533 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1534 u->sink->get_mute = sink_get_mute_cb;
1535 u->sink->set_mute = sink_set_mute_cb;
1536 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1537 } else
1538 pa_log_info("Using software mute control.");
1539
1540 u->mixer_fdl = pa_alsa_fdlist_new();
1541
1542 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1543 pa_log("Failed to initialize file descriptor monitoring");
1544 return -1;
1545 }
1546
1547 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1548 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1549
1550 return 0;
1551 }
1552
1553 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1554
1555 struct userdata *u = NULL;
1556 const char *dev_id = NULL;
1557 pa_sample_spec ss, requested_ss;
1558 pa_channel_map map;
1559 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1560 snd_pcm_uframes_t period_frames, tsched_frames;
1561 size_t frame_size;
1562 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1563 pa_sink_new_data data;
1564 char *control_device = NULL;
1565
1566 pa_assert(m);
1567 pa_assert(ma);
1568
1569 ss = m->core->default_sample_spec;
1570 map = m->core->default_channel_map;
1571 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1572 pa_log("Failed to parse sample specification and channel map");
1573 goto fail;
1574 }
1575
1576 requested_ss = ss;
1577 frame_size = pa_frame_size(&ss);
1578
1579 nfrags = m->core->default_n_fragments;
1580 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1581 if (frag_size <= 0)
1582 frag_size = (uint32_t) frame_size;
1583 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1584 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1585
1586 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1587 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1588 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1589 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1590 pa_log("Failed to parse buffer metrics");
1591 goto fail;
1592 }
1593
1594 hwbuf_size = frag_size * nfrags;
1595 period_frames = frag_size/frame_size;
1596 tsched_frames = tsched_size/frame_size;
1597
1598 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1599 pa_log("Failed to parse mmap argument.");
1600 goto fail;
1601 }
1602
1603 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1604 pa_log("Failed to parse tsched argument.");
1605 goto fail;
1606 }
1607
1608 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1609 pa_log("Failed to parse ignore_dB argument.");
1610 goto fail;
1611 }
1612
1613 if (use_tsched && !pa_rtclock_hrtimer()) {
1614 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1615 use_tsched = FALSE;
1616 }
1617
1618 u = pa_xnew0(struct userdata, 1);
1619 u->core = m->core;
1620 u->module = m;
1621 u->use_mmap = use_mmap;
1622 u->use_tsched = use_tsched;
1623 u->first = TRUE;
1624 u->rtpoll = pa_rtpoll_new();
1625 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1626
1627 u->smoother = pa_smoother_new(
1628 DEFAULT_TSCHED_BUFFER_USEC*2,
1629 DEFAULT_TSCHED_BUFFER_USEC*2,
1630 TRUE,
1631 TRUE,
1632 5,
1633 pa_rtclock_usec(),
1634 TRUE);
1635
1636 dev_id = pa_modargs_get_value(
1637 ma, "device_id",
1638 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1639
1640 if (reserve_init(u, dev_id) < 0)
1641 goto fail;
1642
1643 if (reserve_monitor_init(u, dev_id) < 0)
1644 goto fail;
1645
1646 b = use_mmap;
1647 d = use_tsched;
1648
1649 if (profile) {
1650
1651 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1652 pa_log("device_id= not set");
1653 goto fail;
1654 }
1655
1656 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1657 dev_id,
1658 &u->device_name,
1659 &ss, &map,
1660 SND_PCM_STREAM_PLAYBACK,
1661 &nfrags, &period_frames, tsched_frames,
1662 &b, &d, profile)))
1663
1664 goto fail;
1665
1666 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1667
1668 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1669 dev_id,
1670 &u->device_name,
1671 &ss, &map,
1672 SND_PCM_STREAM_PLAYBACK,
1673 &nfrags, &period_frames, tsched_frames,
1674 &b, &d, &profile)))
1675
1676 goto fail;
1677
1678 } else {
1679
1680 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1681 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1682 &u->device_name,
1683 &ss, &map,
1684 SND_PCM_STREAM_PLAYBACK,
1685 &nfrags, &period_frames, tsched_frames,
1686 &b, &d, FALSE)))
1687 goto fail;
1688
1689 }
1690
1691 pa_assert(u->device_name);
1692 pa_log_info("Successfully opened device %s.", u->device_name);
1693
1694 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1695 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1696 goto fail;
1697 }
1698
1699 if (profile)
1700 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1701
1702 if (use_mmap && !b) {
1703 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1704 u->use_mmap = use_mmap = FALSE;
1705 }
1706
1707 if (use_tsched && (!b || !d)) {
1708 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1709 u->use_tsched = use_tsched = FALSE;
1710 }
1711
1712 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1713 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1714 u->use_tsched = use_tsched = FALSE;
1715 }
1716
1717 if (u->use_mmap)
1718 pa_log_info("Successfully enabled mmap() mode.");
1719
1720 if (u->use_tsched)
1721 pa_log_info("Successfully enabled timer-based scheduling mode.");
1722
1723 /* ALSA might tweak the sample spec, so recalculate the frame size */
1724 frame_size = pa_frame_size(&ss);
1725
1726 pa_alsa_find_mixer_and_elem(u->pcm_handle, &control_device, &u->mixer_handle, &u->mixer_elem, pa_modargs_get_value(ma, "control", NULL), profile);
1727
1728 pa_sink_new_data_init(&data);
1729 data.driver = driver;
1730 data.module = m;
1731 data.card = card;
1732 set_sink_name(&data, ma, dev_id, u->device_name);
1733 pa_sink_new_data_set_sample_spec(&data, &ss);
1734 pa_sink_new_data_set_channel_map(&data, &map);
1735
1736 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle, u->mixer_elem);
1737 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1738 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1739 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1740 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1741
1742 if (profile) {
1743 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1744 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1745 }
1746
1747 pa_alsa_init_description(data.proplist);
1748
1749 if (control_device) {
1750 pa_alsa_init_proplist_ctl(data.proplist, control_device);
1751 pa_xfree(control_device);
1752 }
1753
1754 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1755 pa_log("Invalid properties");
1756 pa_sink_new_data_done(&data);
1757 goto fail;
1758 }
1759
1760 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1761 pa_sink_new_data_done(&data);
1762
1763 if (!u->sink) {
1764 pa_log("Failed to create sink object");
1765 goto fail;
1766 }
1767
1768 u->sink->parent.process_msg = sink_process_msg;
1769 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1770 u->sink->set_state = sink_set_state_cb;
1771 u->sink->userdata = u;
1772
1773 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1774 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1775
1776 u->frame_size = frame_size;
1777 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1778 u->nfragments = nfrags;
1779 u->hwbuf_size = u->fragment_size * nfrags;
1780 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1781 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1782
1783 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1784 nfrags, (long unsigned) u->fragment_size,
1785 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1786
1787 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1788 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1789
1790 if (u->use_tsched) {
1791 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1792
1793 fix_min_sleep_wakeup(u);
1794 fix_tsched_watermark(u);
1795
1796 pa_sink_set_latency_range(u->sink,
1797 0,
1798 pa_bytes_to_usec(u->hwbuf_size, &ss));
1799
1800 pa_log_info("Time scheduling watermark is %0.2fms",
1801 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1802 } else
1803 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1804
1805 reserve_update(u);
1806
1807 if (update_sw_params(u) < 0)
1808 goto fail;
1809
1810 if (setup_mixer(u, ignore_dB) < 0)
1811 goto fail;
1812
1813 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1814
1815 if (!(u->thread = pa_thread_new(thread_func, u))) {
1816 pa_log("Failed to create thread.");
1817 goto fail;
1818 }
1819
1820 /* Get initial mixer settings */
1821 if (data.volume_is_set) {
1822 if (u->sink->set_volume)
1823 u->sink->set_volume(u->sink);
1824 } else {
1825 if (u->sink->get_volume)
1826 u->sink->get_volume(u->sink);
1827 }
1828
1829 if (data.muted_is_set) {
1830 if (u->sink->set_mute)
1831 u->sink->set_mute(u->sink);
1832 } else {
1833 if (u->sink->get_mute)
1834 u->sink->get_mute(u->sink);
1835 }
1836
1837 pa_sink_put(u->sink);
1838
1839 return u->sink;
1840
1841 fail:
1842
1843 if (u)
1844 userdata_free(u);
1845
1846 return NULL;
1847 }
1848
1849 static void userdata_free(struct userdata *u) {
1850 pa_assert(u);
1851
1852 if (u->sink)
1853 pa_sink_unlink(u->sink);
1854
1855 if (u->thread) {
1856 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1857 pa_thread_free(u->thread);
1858 }
1859
1860 pa_thread_mq_done(&u->thread_mq);
1861
1862 if (u->sink)
1863 pa_sink_unref(u->sink);
1864
1865 if (u->memchunk.memblock)
1866 pa_memblock_unref(u->memchunk.memblock);
1867
1868 if (u->alsa_rtpoll_item)
1869 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1870
1871 if (u->rtpoll)
1872 pa_rtpoll_free(u->rtpoll);
1873
1874 if (u->mixer_fdl)
1875 pa_alsa_fdlist_free(u->mixer_fdl);
1876
1877 if (u->mixer_handle)
1878 snd_mixer_close(u->mixer_handle);
1879
1880 if (u->pcm_handle) {
1881 snd_pcm_drop(u->pcm_handle);
1882 snd_pcm_close(u->pcm_handle);
1883 }
1884
1885 if (u->smoother)
1886 pa_smoother_free(u->smoother);
1887
1888 reserve_done(u);
1889 monitor_done(u);
1890
1891 pa_xfree(u->device_name);
1892 pa_xfree(u);
1893 }
1894
1895 void pa_alsa_sink_free(pa_sink *s) {
1896 struct userdata *u;
1897
1898 pa_sink_assert_ref(s);
1899 pa_assert_se(u = s->userdata);
1900
1901 userdata_free(u);
1902 }