]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: handle correctly if alsa returns us 0 or EAGAIN on snd_pcm_mmap_begin if we...
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 struct userdata {
72 pa_core *core;
73 pa_module *module;
74 pa_sink *sink;
75
76 pa_thread *thread;
77 pa_thread_mq thread_mq;
78 pa_rtpoll *rtpoll;
79
80 snd_pcm_t *pcm_handle;
81
82 pa_alsa_fdlist *mixer_fdl;
83 snd_mixer_t *mixer_handle;
84 pa_alsa_path_set *mixer_path_set;
85 pa_alsa_path *mixer_path;
86
87 pa_cvolume hardware_volume;
88
89 size_t
90 frame_size,
91 fragment_size,
92 hwbuf_size,
93 tsched_watermark,
94 hwbuf_unused,
95 min_sleep,
96 min_wakeup,
97 watermark_step;
98
99 unsigned nfragments;
100 pa_memchunk memchunk;
101
102 char *device_name; /* name of the PCM device */
103 char *control_device; /* name of the control device */
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 /* We are resuming, try to lock the device */
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
344 pa_log("%s: %s", call, pa_alsa_strerror(err));
345 return -1;
346 }
347
348 u->first = TRUE;
349 u->since_start = 0;
350 return 0;
351 }
352
353 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
354 size_t left_to_play;
355
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
360
361 if (n_bytes <= u->hwbuf_size) {
362 left_to_play = u->hwbuf_size - n_bytes;
363
364 #ifdef DEBUG_TIMING
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
366 #endif
367
368 } else {
369 left_to_play = 0;
370
371 #ifdef DEBUG_TIMING
372 PA_DEBUG_TRAP;
373 #endif
374
375 if (!u->first && !u->after_rewind) {
376
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
379
380 if (u->use_tsched)
381 adjust_after_underrun(u);
382 }
383 }
384
385 return left_to_play;
386 }
387
388 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
389 pa_bool_t work_done = TRUE;
390 pa_usec_t max_sleep_usec = 0, process_usec = 0;
391 size_t left_to_play;
392 unsigned j = 0;
393
394 pa_assert(u);
395 pa_sink_assert_ref(u->sink);
396
397 if (u->use_tsched)
398 hw_sleep_time(u, &max_sleep_usec, &process_usec);
399
400 for (;;) {
401 snd_pcm_sframes_t n;
402 size_t n_bytes;
403 int r;
404 pa_bool_t after_avail = TRUE;
405
406 /* First we determine how many samples are missing to fill the
407 * buffer up to 100% */
408
409 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
410
411 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
412 continue;
413
414 return r;
415 }
416
417 n_bytes = (size_t) n * u->frame_size;
418
419 #ifdef DEBUG_TIMING
420 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
421 #endif
422
423 left_to_play = check_left_to_play(u, n_bytes);
424
425 if (u->use_tsched)
426
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
432
433 if (!polled &&
434 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
435 #ifdef DEBUG_TIMING
436 pa_log_debug("Not filling up, because too early.");
437 #endif
438 break;
439 }
440
441 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
442
443 if (polled)
444 PA_ONCE_BEGIN {
445 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
446 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
447 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
448 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
449 pa_strnull(dn));
450 pa_xfree(dn);
451 } PA_ONCE_END;
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("Not filling up, because not necessary.");
455 #endif
456 break;
457 }
458
459
460 if (++j > 10) {
461 #ifdef DEBUG_TIMING
462 pa_log_debug("Not filling up, because already too many iterations.");
463 #endif
464
465 break;
466 }
467
468 n_bytes -= u->hwbuf_unused;
469 polled = FALSE;
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Filling up");
473 #endif
474
475 for (;;) {
476 pa_memchunk chunk;
477 void *p;
478 int err;
479 const snd_pcm_channel_area_t *areas;
480 snd_pcm_uframes_t offset, frames;
481 snd_pcm_sframes_t sframes;
482
483 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
484 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
485
486 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
487
488 if (!after_avail && err == -EAGAIN)
489 break;
490
491 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
492 continue;
493
494 return r;
495 }
496
497 /* Make sure that if these memblocks need to be copied they will fit into one slot */
498 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
499 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
500
501 if (!after_avail && frames == 0)
502 break;
503
504 pa_assert(frames > 0);
505 after_avail = FALSE;
506
507 /* Check these are multiples of 8 bit */
508 pa_assert((areas[0].first & 7) == 0);
509 pa_assert((areas[0].step & 7)== 0);
510
511 /* We assume a single interleaved memory buffer */
512 pa_assert((areas[0].first >> 3) == 0);
513 pa_assert((areas[0].step >> 3) == u->frame_size);
514
515 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
516
517 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
518 chunk.length = pa_memblock_get_length(chunk.memblock);
519 chunk.index = 0;
520
521 pa_sink_render_into_full(u->sink, &chunk);
522 pa_memblock_unref_fixed(chunk.memblock);
523
524 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
525
526 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
527 continue;
528
529 return r;
530 }
531
532 work_done = TRUE;
533
534 u->write_count += frames * u->frame_size;
535 u->since_start += frames * u->frame_size;
536
537 #ifdef DEBUG_TIMING
538 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
539 #endif
540
541 if ((size_t) frames * u->frame_size >= n_bytes)
542 break;
543
544 n_bytes -= (size_t) frames * u->frame_size;
545 }
546 }
547
548 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
549
550 if (*sleep_usec > process_usec)
551 *sleep_usec -= process_usec;
552 else
553 *sleep_usec = 0;
554
555 return work_done ? 1 : 0;
556 }
557
558 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
559 pa_bool_t work_done = FALSE;
560 pa_usec_t max_sleep_usec = 0, process_usec = 0;
561 size_t left_to_play;
562 unsigned j = 0;
563
564 pa_assert(u);
565 pa_sink_assert_ref(u->sink);
566
567 if (u->use_tsched)
568 hw_sleep_time(u, &max_sleep_usec, &process_usec);
569
570 for (;;) {
571 snd_pcm_sframes_t n;
572 size_t n_bytes;
573 int r;
574
575 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
576
577 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
578 continue;
579
580 return r;
581 }
582
583 n_bytes = (size_t) n * u->frame_size;
584 left_to_play = check_left_to_play(u, n_bytes);
585
586 if (u->use_tsched)
587
588 /* We won't fill up the playback buffer before at least
589 * half the sleep time is over because otherwise we might
590 * ask for more data from the clients then they expect. We
591 * need to guarantee that clients only have to keep around
592 * a single hw buffer length. */
593
594 if (!polled &&
595 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
596 break;
597
598 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
599
600 if (polled)
601 PA_ONCE_BEGIN {
602 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
603 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
604 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
605 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
606 pa_strnull(dn));
607 pa_xfree(dn);
608 } PA_ONCE_END;
609
610 break;
611 }
612
613 if (++j > 10) {
614 #ifdef DEBUG_TIMING
615 pa_log_debug("Not filling up, because already too many iterations.");
616 #endif
617
618 break;
619 }
620
621 n_bytes -= u->hwbuf_unused;
622 polled = FALSE;
623
624 for (;;) {
625 snd_pcm_sframes_t frames;
626 void *p;
627 pa_bool_t after_avail = TRUE;
628
629 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
630
631 if (u->memchunk.length <= 0)
632 pa_sink_render(u->sink, n_bytes, &u->memchunk);
633
634 pa_assert(u->memchunk.length > 0);
635
636 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
637
638 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
639 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
640
641 p = pa_memblock_acquire(u->memchunk.memblock);
642 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
643 pa_memblock_release(u->memchunk.memblock);
644
645 if (PA_UNLIKELY(frames < 0)) {
646
647 if (!after_avail && (int) frames == -EAGAIN)
648 break;
649
650 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
651 continue;
652
653 return r;
654 }
655
656 if (!after_avail && frames == 0)
657 break;
658
659 pa_assert(frames > 0);
660 after_avail = FALSE;
661
662 u->memchunk.index += (size_t) frames * u->frame_size;
663 u->memchunk.length -= (size_t) frames * u->frame_size;
664
665 if (u->memchunk.length <= 0) {
666 pa_memblock_unref(u->memchunk.memblock);
667 pa_memchunk_reset(&u->memchunk);
668 }
669
670 work_done = TRUE;
671
672 u->write_count += frames * u->frame_size;
673 u->since_start += frames * u->frame_size;
674
675 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
676
677 if ((size_t) frames * u->frame_size >= n_bytes)
678 break;
679
680 n_bytes -= (size_t) frames * u->frame_size;
681 }
682 }
683
684 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
685
686 if (*sleep_usec > process_usec)
687 *sleep_usec -= process_usec;
688 else
689 *sleep_usec = 0;
690
691 return work_done ? 1 : 0;
692 }
693
694 static void update_smoother(struct userdata *u) {
695 snd_pcm_sframes_t delay = 0;
696 int64_t position;
697 int err;
698 pa_usec_t now1 = 0, now2;
699 snd_pcm_status_t *status;
700
701 snd_pcm_status_alloca(&status);
702
703 pa_assert(u);
704 pa_assert(u->pcm_handle);
705
706 /* Let's update the time smoother */
707
708 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
709 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
710 return;
711 }
712
713 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
714 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
715 else {
716 snd_htimestamp_t htstamp = { 0, 0 };
717 snd_pcm_status_get_htstamp(status, &htstamp);
718 now1 = pa_timespec_load(&htstamp);
719 }
720
721 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
722
723 if (PA_UNLIKELY(position < 0))
724 position = 0;
725
726 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
727 if (now1 <= 0)
728 now1 = pa_rtclock_now();
729
730 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
731
732 pa_smoother_put(u->smoother, now1, now2);
733 }
734
735 static pa_usec_t sink_get_latency(struct userdata *u) {
736 pa_usec_t r;
737 int64_t delay;
738 pa_usec_t now1, now2;
739
740 pa_assert(u);
741
742 now1 = pa_rtclock_now();
743 now2 = pa_smoother_get(u->smoother, now1);
744
745 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
746
747 r = delay >= 0 ? (pa_usec_t) delay : 0;
748
749 if (u->memchunk.memblock)
750 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
751
752 return r;
753 }
754
755 static int build_pollfd(struct userdata *u) {
756 pa_assert(u);
757 pa_assert(u->pcm_handle);
758
759 if (u->alsa_rtpoll_item)
760 pa_rtpoll_item_free(u->alsa_rtpoll_item);
761
762 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
763 return -1;
764
765 return 0;
766 }
767
768 /* Called from IO context */
769 static int suspend(struct userdata *u) {
770 pa_assert(u);
771 pa_assert(u->pcm_handle);
772
773 pa_smoother_pause(u->smoother, pa_rtclock_now());
774
775 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
776 * take awfully long with our long buffer sizes today. */
777 snd_pcm_close(u->pcm_handle);
778 u->pcm_handle = NULL;
779
780 if (u->alsa_rtpoll_item) {
781 pa_rtpoll_item_free(u->alsa_rtpoll_item);
782 u->alsa_rtpoll_item = NULL;
783 }
784
785 pa_log_info("Device suspended...");
786
787 return 0;
788 }
789
790 /* Called from IO context */
791 static int update_sw_params(struct userdata *u) {
792 snd_pcm_uframes_t avail_min;
793 int err;
794
795 pa_assert(u);
796
797 /* Use the full buffer if noone asked us for anything specific */
798 u->hwbuf_unused = 0;
799
800 if (u->use_tsched) {
801 pa_usec_t latency;
802
803 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
804 size_t b;
805
806 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
807
808 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
809
810 /* We need at least one sample in our buffer */
811
812 if (PA_UNLIKELY(b < u->frame_size))
813 b = u->frame_size;
814
815 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
816 }
817
818 fix_min_sleep_wakeup(u);
819 fix_tsched_watermark(u);
820 }
821
822 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
823
824 /* We need at last one frame in the used part of the buffer */
825 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
826
827 if (u->use_tsched) {
828 pa_usec_t sleep_usec, process_usec;
829
830 hw_sleep_time(u, &sleep_usec, &process_usec);
831 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
832 }
833
834 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
835
836 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
837 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
838 return err;
839 }
840
841 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
842
843 return 0;
844 }
845
846 /* Called from IO context */
847 static int unsuspend(struct userdata *u) {
848 pa_sample_spec ss;
849 int err;
850 pa_bool_t b, d;
851 unsigned nfrags;
852 snd_pcm_uframes_t period_size;
853
854 pa_assert(u);
855 pa_assert(!u->pcm_handle);
856
857 pa_log_info("Trying resume...");
858
859 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
860 /*SND_PCM_NONBLOCK|*/
861 SND_PCM_NO_AUTO_RESAMPLE|
862 SND_PCM_NO_AUTO_CHANNELS|
863 SND_PCM_NO_AUTO_FORMAT)) < 0) {
864 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
865 goto fail;
866 }
867
868 ss = u->sink->sample_spec;
869 nfrags = u->nfragments;
870 period_size = u->fragment_size / u->frame_size;
871 b = u->use_mmap;
872 d = u->use_tsched;
873
874 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
875 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
876 goto fail;
877 }
878
879 if (b != u->use_mmap || d != u->use_tsched) {
880 pa_log_warn("Resume failed, couldn't get original access mode.");
881 goto fail;
882 }
883
884 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
885 pa_log_warn("Resume failed, couldn't restore original sample settings.");
886 goto fail;
887 }
888
889 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
890 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
891 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
892 (unsigned long) nfrags, period_size * u->frame_size);
893 goto fail;
894 }
895
896 if (update_sw_params(u) < 0)
897 goto fail;
898
899 if (build_pollfd(u) < 0)
900 goto fail;
901
902 u->first = TRUE;
903 u->since_start = 0;
904
905 pa_log_info("Resumed successfully...");
906
907 return 0;
908
909 fail:
910 if (u->pcm_handle) {
911 snd_pcm_close(u->pcm_handle);
912 u->pcm_handle = NULL;
913 }
914
915 return -1;
916 }
917
918 /* Called from IO context */
919 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
920 struct userdata *u = PA_SINK(o)->userdata;
921
922 switch (code) {
923
924 case PA_SINK_MESSAGE_GET_LATENCY: {
925 pa_usec_t r = 0;
926
927 if (u->pcm_handle)
928 r = sink_get_latency(u);
929
930 *((pa_usec_t*) data) = r;
931
932 return 0;
933 }
934
935 case PA_SINK_MESSAGE_SET_STATE:
936
937 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
938
939 case PA_SINK_SUSPENDED:
940 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
941
942 if (suspend(u) < 0)
943 return -1;
944
945 break;
946
947 case PA_SINK_IDLE:
948 case PA_SINK_RUNNING:
949
950 if (u->sink->thread_info.state == PA_SINK_INIT) {
951 if (build_pollfd(u) < 0)
952 return -1;
953 }
954
955 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
956 if (unsuspend(u) < 0)
957 return -1;
958 }
959
960 break;
961
962 case PA_SINK_UNLINKED:
963 case PA_SINK_INIT:
964 case PA_SINK_INVALID_STATE:
965 ;
966 }
967
968 break;
969 }
970
971 return pa_sink_process_msg(o, code, data, offset, chunk);
972 }
973
974 /* Called from main context */
975 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
976 pa_sink_state_t old_state;
977 struct userdata *u;
978
979 pa_sink_assert_ref(s);
980 pa_assert_se(u = s->userdata);
981
982 old_state = pa_sink_get_state(u->sink);
983
984 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
985 reserve_done(u);
986 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
987 if (reserve_init(u, u->device_name) < 0)
988 return -1;
989
990 return 0;
991 }
992
993 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
994 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
995
996 pa_assert(u);
997 pa_assert(u->mixer_handle);
998
999 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1000 return 0;
1001
1002 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1003 pa_sink_get_volume(u->sink, TRUE, FALSE);
1004 pa_sink_get_mute(u->sink, TRUE);
1005 }
1006
1007 return 0;
1008 }
1009
1010 static void sink_get_volume_cb(pa_sink *s) {
1011 struct userdata *u = s->userdata;
1012 pa_cvolume r;
1013 char t[PA_CVOLUME_SNPRINT_MAX];
1014
1015 pa_assert(u);
1016 pa_assert(u->mixer_path);
1017 pa_assert(u->mixer_handle);
1018
1019 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1020 return;
1021
1022 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1023 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1024
1025 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1026
1027 if (pa_cvolume_equal(&u->hardware_volume, &r))
1028 return;
1029
1030 s->virtual_volume = u->hardware_volume = r;
1031
1032 if (u->mixer_path->has_dB) {
1033 pa_cvolume reset;
1034
1035 /* Hmm, so the hardware volume changed, let's reset our software volume */
1036 pa_cvolume_reset(&reset, s->sample_spec.channels);
1037 pa_sink_set_soft_volume(s, &reset);
1038 }
1039 }
1040
1041 static void sink_set_volume_cb(pa_sink *s) {
1042 struct userdata *u = s->userdata;
1043 pa_cvolume r;
1044 char t[PA_CVOLUME_SNPRINT_MAX];
1045
1046 pa_assert(u);
1047 pa_assert(u->mixer_path);
1048 pa_assert(u->mixer_handle);
1049
1050 /* Shift up by the base volume */
1051 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1052
1053 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1054 return;
1055
1056 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1057 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1058
1059 u->hardware_volume = r;
1060
1061 if (u->mixer_path->has_dB) {
1062
1063 /* Match exactly what the user requested by software */
1064 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1065
1066 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1067 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1068 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1069
1070 } else {
1071 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1072
1073 /* We can't match exactly what the user requested, hence let's
1074 * at least tell the user about it */
1075
1076 s->virtual_volume = r;
1077 }
1078 }
1079
1080 static void sink_get_mute_cb(pa_sink *s) {
1081 struct userdata *u = s->userdata;
1082 pa_bool_t b;
1083
1084 pa_assert(u);
1085 pa_assert(u->mixer_path);
1086 pa_assert(u->mixer_handle);
1087
1088 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1089 return;
1090
1091 s->muted = b;
1092 }
1093
1094 static void sink_set_mute_cb(pa_sink *s) {
1095 struct userdata *u = s->userdata;
1096
1097 pa_assert(u);
1098 pa_assert(u->mixer_path);
1099 pa_assert(u->mixer_handle);
1100
1101 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1102 }
1103
1104 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1105 struct userdata *u = s->userdata;
1106 pa_alsa_port_data *data;
1107
1108 pa_assert(u);
1109 pa_assert(p);
1110 pa_assert(u->mixer_handle);
1111
1112 data = PA_DEVICE_PORT_DATA(p);
1113
1114 pa_assert_se(u->mixer_path = data->path);
1115 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1116
1117 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1118 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1119 s->n_volume_steps = PA_VOLUME_NORM+1;
1120
1121 if (u->mixer_path->max_dB > 0.0)
1122 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1123 else
1124 pa_log_info("No particular base volume set, fixing to 0 dB");
1125 } else {
1126 s->base_volume = PA_VOLUME_NORM;
1127 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1128 }
1129
1130 if (data->setting)
1131 pa_alsa_setting_select(data->setting, u->mixer_handle);
1132
1133 if (s->set_mute)
1134 s->set_mute(s);
1135 if (s->set_volume)
1136 s->set_volume(s);
1137
1138 return 0;
1139 }
1140
1141 static void sink_update_requested_latency_cb(pa_sink *s) {
1142 struct userdata *u = s->userdata;
1143 size_t before;
1144 pa_assert(u);
1145
1146 if (!u->pcm_handle)
1147 return;
1148
1149 before = u->hwbuf_unused;
1150 update_sw_params(u);
1151
1152 /* Let's check whether we now use only a smaller part of the
1153 buffer then before. If so, we need to make sure that subsequent
1154 rewinds are relative to the new maximum fill level and not to the
1155 current fill level. Thus, let's do a full rewind once, to clear
1156 things up. */
1157
1158 if (u->hwbuf_unused > before) {
1159 pa_log_debug("Requesting rewind due to latency change.");
1160 pa_sink_request_rewind(s, (size_t) -1);
1161 }
1162 }
1163
1164 static int process_rewind(struct userdata *u) {
1165 snd_pcm_sframes_t unused;
1166 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1167 pa_assert(u);
1168
1169 /* Figure out how much we shall rewind and reset the counter */
1170 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1171
1172 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1173
1174 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1175 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1176 return -1;
1177 }
1178
1179 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1180
1181 if (u->hwbuf_size > unused_nbytes)
1182 limit_nbytes = u->hwbuf_size - unused_nbytes;
1183 else
1184 limit_nbytes = 0;
1185
1186 if (rewind_nbytes > limit_nbytes)
1187 rewind_nbytes = limit_nbytes;
1188
1189 if (rewind_nbytes > 0) {
1190 snd_pcm_sframes_t in_frames, out_frames;
1191
1192 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1193
1194 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1195 pa_log_debug("before: %lu", (unsigned long) in_frames);
1196 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1197 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1198 return -1;
1199 }
1200 pa_log_debug("after: %lu", (unsigned long) out_frames);
1201
1202 rewind_nbytes = (size_t) out_frames * u->frame_size;
1203
1204 if (rewind_nbytes <= 0)
1205 pa_log_info("Tried rewind, but was apparently not possible.");
1206 else {
1207 u->write_count -= out_frames * u->frame_size;
1208 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1209 pa_sink_process_rewind(u->sink, rewind_nbytes);
1210
1211 u->after_rewind = TRUE;
1212 return 0;
1213 }
1214 } else
1215 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1216
1217 pa_sink_process_rewind(u->sink, 0);
1218 return 0;
1219 }
1220
1221 static void thread_func(void *userdata) {
1222 struct userdata *u = userdata;
1223 unsigned short revents = 0;
1224
1225 pa_assert(u);
1226
1227 pa_log_debug("Thread starting up");
1228
1229 if (u->core->realtime_scheduling)
1230 pa_make_realtime(u->core->realtime_priority);
1231
1232 pa_thread_mq_install(&u->thread_mq);
1233
1234 for (;;) {
1235 int ret;
1236
1237 #ifdef DEBUG_TIMING
1238 pa_log_debug("Loop");
1239 #endif
1240
1241 /* Render some data and write it to the dsp */
1242 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1243 int work_done;
1244 pa_usec_t sleep_usec = 0;
1245
1246 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1247 if (process_rewind(u) < 0)
1248 goto fail;
1249
1250 if (u->use_mmap)
1251 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1252 else
1253 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1254
1255 if (work_done < 0)
1256 goto fail;
1257
1258 /* pa_log_debug("work_done = %i", work_done); */
1259
1260 if (work_done) {
1261
1262 if (u->first) {
1263 pa_log_info("Starting playback.");
1264 snd_pcm_start(u->pcm_handle);
1265
1266 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1267 }
1268
1269 update_smoother(u);
1270 }
1271
1272 if (u->use_tsched) {
1273 pa_usec_t cusec;
1274
1275 if (u->since_start <= u->hwbuf_size) {
1276
1277 /* USB devices on ALSA seem to hit a buffer
1278 * underrun during the first iterations much
1279 * quicker then we calculate here, probably due to
1280 * the transport latency. To accommodate for that
1281 * we artificially decrease the sleep time until
1282 * we have filled the buffer at least once
1283 * completely.*/
1284
1285 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1286 sleep_usec /= 2;
1287 }
1288
1289 /* OK, the playback buffer is now full, let's
1290 * calculate when to wake up next */
1291 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1292
1293 /* Convert from the sound card time domain to the
1294 * system time domain */
1295 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1296
1297 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1298
1299 /* We don't trust the conversion, so we wake up whatever comes first */
1300 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1301 }
1302
1303 u->first = FALSE;
1304 u->after_rewind = FALSE;
1305
1306 } else if (u->use_tsched)
1307
1308 /* OK, we're in an invalid state, let's disable our timers */
1309 pa_rtpoll_set_timer_disabled(u->rtpoll);
1310
1311 /* Hmm, nothing to do. Let's sleep */
1312 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1313 goto fail;
1314
1315 if (ret == 0)
1316 goto finish;
1317
1318 /* Tell ALSA about this and process its response */
1319 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1320 struct pollfd *pollfd;
1321 int err;
1322 unsigned n;
1323
1324 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1325
1326 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1327 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1328 goto fail;
1329 }
1330
1331 if (revents & ~POLLOUT) {
1332 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1333 goto fail;
1334
1335 u->first = TRUE;
1336 u->since_start = 0;
1337 } else if (revents && u->use_tsched && pa_log_ratelimit())
1338 pa_log_debug("Wakeup from ALSA!");
1339
1340 } else
1341 revents = 0;
1342 }
1343
1344 fail:
1345 /* If this was no regular exit from the loop we have to continue
1346 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1347 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1348 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1349
1350 finish:
1351 pa_log_debug("Thread shutting down");
1352 }
1353
1354 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1355 const char *n;
1356 char *t;
1357
1358 pa_assert(data);
1359 pa_assert(ma);
1360 pa_assert(device_name);
1361
1362 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1363 pa_sink_new_data_set_name(data, n);
1364 data->namereg_fail = TRUE;
1365 return;
1366 }
1367
1368 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1369 data->namereg_fail = TRUE;
1370 else {
1371 n = device_id ? device_id : device_name;
1372 data->namereg_fail = FALSE;
1373 }
1374
1375 if (mapping)
1376 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1377 else
1378 t = pa_sprintf_malloc("alsa_output.%s", n);
1379
1380 pa_sink_new_data_set_name(data, t);
1381 pa_xfree(t);
1382 }
1383
1384 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1385
1386 if (!mapping && !element)
1387 return;
1388
1389 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1390 pa_log_info("Failed to find a working mixer device.");
1391 return;
1392 }
1393
1394 if (element) {
1395
1396 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1397 goto fail;
1398
1399 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1400 goto fail;
1401
1402 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1403 pa_alsa_path_dump(u->mixer_path);
1404 } else {
1405
1406 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1407 goto fail;
1408
1409 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1410
1411 pa_log_debug("Probed mixer paths:");
1412 pa_alsa_path_set_dump(u->mixer_path_set);
1413 }
1414
1415 return;
1416
1417 fail:
1418
1419 if (u->mixer_path_set) {
1420 pa_alsa_path_set_free(u->mixer_path_set);
1421 u->mixer_path_set = NULL;
1422 } else if (u->mixer_path) {
1423 pa_alsa_path_free(u->mixer_path);
1424 u->mixer_path = NULL;
1425 }
1426
1427 if (u->mixer_handle) {
1428 snd_mixer_close(u->mixer_handle);
1429 u->mixer_handle = NULL;
1430 }
1431 }
1432
1433 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1434 pa_assert(u);
1435
1436 if (!u->mixer_handle)
1437 return 0;
1438
1439 if (u->sink->active_port) {
1440 pa_alsa_port_data *data;
1441
1442 /* We have a list of supported paths, so let's activate the
1443 * one that has been chosen as active */
1444
1445 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1446 u->mixer_path = data->path;
1447
1448 pa_alsa_path_select(data->path, u->mixer_handle);
1449
1450 if (data->setting)
1451 pa_alsa_setting_select(data->setting, u->mixer_handle);
1452
1453 } else {
1454
1455 if (!u->mixer_path && u->mixer_path_set)
1456 u->mixer_path = u->mixer_path_set->paths;
1457
1458 if (u->mixer_path) {
1459 /* Hmm, we have only a single path, then let's activate it */
1460
1461 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1462
1463 if (u->mixer_path->settings)
1464 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1465 } else
1466 return 0;
1467 }
1468
1469 if (!u->mixer_path->has_volume)
1470 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1471 else {
1472
1473 if (u->mixer_path->has_dB) {
1474 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1475
1476 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1477 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1478
1479 if (u->mixer_path->max_dB > 0.0)
1480 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1481 else
1482 pa_log_info("No particular base volume set, fixing to 0 dB");
1483
1484 } else {
1485 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1486 u->sink->base_volume = PA_VOLUME_NORM;
1487 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1488 }
1489
1490 u->sink->get_volume = sink_get_volume_cb;
1491 u->sink->set_volume = sink_set_volume_cb;
1492
1493 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1494 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1495 }
1496
1497 if (!u->mixer_path->has_mute) {
1498 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1499 } else {
1500 u->sink->get_mute = sink_get_mute_cb;
1501 u->sink->set_mute = sink_set_mute_cb;
1502 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1503 pa_log_info("Using hardware mute control.");
1504 }
1505
1506 u->mixer_fdl = pa_alsa_fdlist_new();
1507
1508 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1509 pa_log("Failed to initialize file descriptor monitoring");
1510 return -1;
1511 }
1512
1513 if (u->mixer_path_set)
1514 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1515 else
1516 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1517
1518 return 0;
1519 }
1520
1521 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1522
1523 struct userdata *u = NULL;
1524 const char *dev_id = NULL;
1525 pa_sample_spec ss, requested_ss;
1526 pa_channel_map map;
1527 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1528 snd_pcm_uframes_t period_frames, tsched_frames;
1529 size_t frame_size;
1530 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1531 pa_sink_new_data data;
1532 pa_alsa_profile_set *profile_set = NULL;
1533
1534 pa_assert(m);
1535 pa_assert(ma);
1536
1537 ss = m->core->default_sample_spec;
1538 map = m->core->default_channel_map;
1539 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1540 pa_log("Failed to parse sample specification and channel map");
1541 goto fail;
1542 }
1543
1544 requested_ss = ss;
1545 frame_size = pa_frame_size(&ss);
1546
1547 nfrags = m->core->default_n_fragments;
1548 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1549 if (frag_size <= 0)
1550 frag_size = (uint32_t) frame_size;
1551 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1552 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1553
1554 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1555 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1556 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1557 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1558 pa_log("Failed to parse buffer metrics");
1559 goto fail;
1560 }
1561
1562 hwbuf_size = frag_size * nfrags;
1563 period_frames = frag_size/frame_size;
1564 tsched_frames = tsched_size/frame_size;
1565
1566 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1567 pa_log("Failed to parse mmap argument.");
1568 goto fail;
1569 }
1570
1571 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1572 pa_log("Failed to parse tsched argument.");
1573 goto fail;
1574 }
1575
1576 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1577 pa_log("Failed to parse ignore_dB argument.");
1578 goto fail;
1579 }
1580
1581 if (use_tsched && !pa_rtclock_hrtimer()) {
1582 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1583 use_tsched = FALSE;
1584 }
1585
1586 u = pa_xnew0(struct userdata, 1);
1587 u->core = m->core;
1588 u->module = m;
1589 u->use_mmap = use_mmap;
1590 u->use_tsched = use_tsched;
1591 u->first = TRUE;
1592 u->rtpoll = pa_rtpoll_new();
1593 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1594
1595 u->smoother = pa_smoother_new(
1596 DEFAULT_TSCHED_BUFFER_USEC*2,
1597 DEFAULT_TSCHED_BUFFER_USEC*2,
1598 TRUE,
1599 TRUE,
1600 5,
1601 pa_rtclock_now(),
1602 TRUE);
1603
1604 dev_id = pa_modargs_get_value(
1605 ma, "device_id",
1606 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1607
1608 if (reserve_init(u, dev_id) < 0)
1609 goto fail;
1610
1611 if (reserve_monitor_init(u, dev_id) < 0)
1612 goto fail;
1613
1614 b = use_mmap;
1615 d = use_tsched;
1616
1617 if (mapping) {
1618
1619 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1620 pa_log("device_id= not set");
1621 goto fail;
1622 }
1623
1624 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1625 dev_id,
1626 &u->device_name,
1627 &ss, &map,
1628 SND_PCM_STREAM_PLAYBACK,
1629 &nfrags, &period_frames, tsched_frames,
1630 &b, &d, mapping)))
1631
1632 goto fail;
1633
1634 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1635
1636 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1637 goto fail;
1638
1639 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1640 dev_id,
1641 &u->device_name,
1642 &ss, &map,
1643 SND_PCM_STREAM_PLAYBACK,
1644 &nfrags, &period_frames, tsched_frames,
1645 &b, &d, profile_set, &mapping)))
1646
1647 goto fail;
1648
1649 } else {
1650
1651 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1652 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1653 &u->device_name,
1654 &ss, &map,
1655 SND_PCM_STREAM_PLAYBACK,
1656 &nfrags, &period_frames, tsched_frames,
1657 &b, &d, FALSE)))
1658 goto fail;
1659 }
1660
1661 pa_assert(u->device_name);
1662 pa_log_info("Successfully opened device %s.", u->device_name);
1663
1664 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1665 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1666 goto fail;
1667 }
1668
1669 if (mapping)
1670 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1671
1672 if (use_mmap && !b) {
1673 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1674 u->use_mmap = use_mmap = FALSE;
1675 }
1676
1677 if (use_tsched && (!b || !d)) {
1678 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1679 u->use_tsched = use_tsched = FALSE;
1680 }
1681
1682 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1683 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1684 u->use_tsched = use_tsched = FALSE;
1685 }
1686
1687 if (u->use_mmap)
1688 pa_log_info("Successfully enabled mmap() mode.");
1689
1690 if (u->use_tsched)
1691 pa_log_info("Successfully enabled timer-based scheduling mode.");
1692
1693 /* ALSA might tweak the sample spec, so recalculate the frame size */
1694 frame_size = pa_frame_size(&ss);
1695
1696 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1697
1698 pa_sink_new_data_init(&data);
1699 data.driver = driver;
1700 data.module = m;
1701 data.card = card;
1702 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1703 pa_sink_new_data_set_sample_spec(&data, &ss);
1704 pa_sink_new_data_set_channel_map(&data, &map);
1705
1706 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1707 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1708 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1709 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1710 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1711
1712 if (mapping) {
1713 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1714 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1715 }
1716
1717 pa_alsa_init_description(data.proplist);
1718
1719 if (u->control_device)
1720 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1721
1722 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1723 pa_log("Invalid properties");
1724 pa_sink_new_data_done(&data);
1725 goto fail;
1726 }
1727
1728 if (u->mixer_path_set)
1729 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1730
1731 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1732 pa_sink_new_data_done(&data);
1733
1734 if (!u->sink) {
1735 pa_log("Failed to create sink object");
1736 goto fail;
1737 }
1738
1739 u->sink->parent.process_msg = sink_process_msg;
1740 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1741 u->sink->set_state = sink_set_state_cb;
1742 u->sink->set_port = sink_set_port_cb;
1743 u->sink->userdata = u;
1744
1745 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1746 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1747
1748 u->frame_size = frame_size;
1749 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1750 u->nfragments = nfrags;
1751 u->hwbuf_size = u->fragment_size * nfrags;
1752 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1753 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1754
1755 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1756 nfrags, (long unsigned) u->fragment_size,
1757 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1758
1759 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1760 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1761
1762 if (u->use_tsched) {
1763 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1764
1765 fix_min_sleep_wakeup(u);
1766 fix_tsched_watermark(u);
1767
1768 pa_sink_set_latency_range(u->sink,
1769 0,
1770 pa_bytes_to_usec(u->hwbuf_size, &ss));
1771
1772 pa_log_info("Time scheduling watermark is %0.2fms",
1773 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1774 } else
1775 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1776
1777 reserve_update(u);
1778
1779 if (update_sw_params(u) < 0)
1780 goto fail;
1781
1782 if (setup_mixer(u, ignore_dB) < 0)
1783 goto fail;
1784
1785 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1786
1787 if (!(u->thread = pa_thread_new(thread_func, u))) {
1788 pa_log("Failed to create thread.");
1789 goto fail;
1790 }
1791
1792 /* Get initial mixer settings */
1793 if (data.volume_is_set) {
1794 if (u->sink->set_volume)
1795 u->sink->set_volume(u->sink);
1796 } else {
1797 if (u->sink->get_volume)
1798 u->sink->get_volume(u->sink);
1799 }
1800
1801 if (data.muted_is_set) {
1802 if (u->sink->set_mute)
1803 u->sink->set_mute(u->sink);
1804 } else {
1805 if (u->sink->get_mute)
1806 u->sink->get_mute(u->sink);
1807 }
1808
1809 pa_sink_put(u->sink);
1810
1811 if (profile_set)
1812 pa_alsa_profile_set_free(profile_set);
1813
1814 return u->sink;
1815
1816 fail:
1817
1818 if (u)
1819 userdata_free(u);
1820
1821 if (profile_set)
1822 pa_alsa_profile_set_free(profile_set);
1823
1824 return NULL;
1825 }
1826
1827 static void userdata_free(struct userdata *u) {
1828 pa_assert(u);
1829
1830 if (u->sink)
1831 pa_sink_unlink(u->sink);
1832
1833 if (u->thread) {
1834 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1835 pa_thread_free(u->thread);
1836 }
1837
1838 pa_thread_mq_done(&u->thread_mq);
1839
1840 if (u->sink)
1841 pa_sink_unref(u->sink);
1842
1843 if (u->memchunk.memblock)
1844 pa_memblock_unref(u->memchunk.memblock);
1845
1846 if (u->alsa_rtpoll_item)
1847 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1848
1849 if (u->rtpoll)
1850 pa_rtpoll_free(u->rtpoll);
1851
1852 if (u->pcm_handle) {
1853 snd_pcm_drop(u->pcm_handle);
1854 snd_pcm_close(u->pcm_handle);
1855 }
1856
1857 if (u->mixer_fdl)
1858 pa_alsa_fdlist_free(u->mixer_fdl);
1859
1860 if (u->mixer_path_set)
1861 pa_alsa_path_set_free(u->mixer_path_set);
1862 else if (u->mixer_path)
1863 pa_alsa_path_free(u->mixer_path);
1864
1865 if (u->mixer_handle)
1866 snd_mixer_close(u->mixer_handle);
1867
1868 if (u->smoother)
1869 pa_smoother_free(u->smoother);
1870
1871 reserve_done(u);
1872 monitor_done(u);
1873
1874 pa_xfree(u->device_name);
1875 pa_xfree(u->control_device);
1876 pa_xfree(u);
1877 }
1878
1879 void pa_alsa_sink_free(pa_sink *s) {
1880 struct userdata *u;
1881
1882 pa_sink_assert_ref(s);
1883 pa_assert_se(u = s->userdata);
1884
1885 userdata_free(u);
1886 }