]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
alsa: throw timing data away after device resume
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 struct userdata {
72 pa_core *core;
73 pa_module *module;
74 pa_sink *sink;
75
76 pa_thread *thread;
77 pa_thread_mq thread_mq;
78 pa_rtpoll *rtpoll;
79
80 snd_pcm_t *pcm_handle;
81
82 pa_alsa_fdlist *mixer_fdl;
83 snd_mixer_t *mixer_handle;
84 pa_alsa_path_set *mixer_path_set;
85 pa_alsa_path *mixer_path;
86
87 pa_cvolume hardware_volume;
88
89 size_t
90 frame_size,
91 fragment_size,
92 hwbuf_size,
93 tsched_watermark,
94 hwbuf_unused,
95 min_sleep,
96 min_wakeup,
97 watermark_step;
98
99 unsigned nfragments;
100 pa_memchunk memchunk;
101
102 char *device_name; /* name of the PCM device */
103 char *control_device; /* name of the control device */
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 if (!(rname = pa_alsa_get_reserve_name(dname)))
173 return 0;
174
175 /* We are resuming, try to lock the device */
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 if (!(rname = pa_alsa_get_reserve_name(dname)))
226 return 0;
227
228 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
229 pa_xfree(rname);
230
231 if (!(u->monitor))
232 return -1;
233
234 pa_assert(!u->monitor_slot);
235 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
236
237 return 0;
238 }
239
240 static void fix_min_sleep_wakeup(struct userdata *u) {
241 size_t max_use, max_use_2;
242
243 pa_assert(u);
244
245 max_use = u->hwbuf_size - u->hwbuf_unused;
246 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
247
248 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
249 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
250
251 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
252 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
253 }
254
255 static void fix_tsched_watermark(struct userdata *u) {
256 size_t max_use;
257 pa_assert(u);
258
259 max_use = u->hwbuf_size - u->hwbuf_unused;
260
261 if (u->tsched_watermark > max_use - u->min_sleep)
262 u->tsched_watermark = max_use - u->min_sleep;
263
264 if (u->tsched_watermark < u->min_wakeup)
265 u->tsched_watermark = u->min_wakeup;
266 }
267
268 static void adjust_after_underrun(struct userdata *u) {
269 size_t old_watermark;
270 pa_usec_t old_min_latency, new_min_latency;
271
272 pa_assert(u);
273 pa_assert(u->use_tsched);
274
275 /* First, just try to increase the watermark */
276 old_watermark = u->tsched_watermark;
277 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
278 fix_tsched_watermark(u);
279
280 if (old_watermark != u->tsched_watermark) {
281 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
282 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
283 return;
284 }
285
286 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
287 old_min_latency = u->sink->thread_info.min_latency;
288 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
289 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
290
291 if (old_min_latency != new_min_latency) {
292 pa_log_notice("Increasing minimal latency to %0.2f ms",
293 (double) new_min_latency / PA_USEC_PER_MSEC);
294
295 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
296 return;
297 }
298
299 /* When we reach this we're officialy fucked! */
300 }
301
302 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
303 pa_usec_t usec, wm;
304
305 pa_assert(sleep_usec);
306 pa_assert(process_usec);
307
308 pa_assert(u);
309
310 usec = pa_sink_get_requested_latency_within_thread(u->sink);
311
312 if (usec == (pa_usec_t) -1)
313 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
314
315 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
316
317 if (wm > usec)
318 wm = usec/2;
319
320 *sleep_usec = usec - wm;
321 *process_usec = wm;
322
323 #ifdef DEBUG_TIMING
324 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
325 (unsigned long) (usec / PA_USEC_PER_MSEC),
326 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
328 #endif
329 }
330
331 static int try_recover(struct userdata *u, const char *call, int err) {
332 pa_assert(u);
333 pa_assert(call);
334 pa_assert(err < 0);
335
336 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
337
338 pa_assert(err != -EAGAIN);
339
340 if (err == -EPIPE)
341 pa_log_debug("%s: Buffer underrun!", call);
342
343 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
344 pa_log("%s: %s", call, pa_alsa_strerror(err));
345 return -1;
346 }
347
348 u->first = TRUE;
349 u->since_start = 0;
350 return 0;
351 }
352
353 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
354 size_t left_to_play;
355
356 /* We use <= instead of < for this check here because an underrun
357 * only happens after the last sample was processed, not already when
358 * it is removed from the buffer. This is particularly important
359 * when block transfer is used. */
360
361 if (n_bytes <= u->hwbuf_size) {
362 left_to_play = u->hwbuf_size - n_bytes;
363
364 #ifdef DEBUG_TIMING
365 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
366 #endif
367
368 } else {
369 left_to_play = 0;
370
371 #ifdef DEBUG_TIMING
372 PA_DEBUG_TRAP;
373 #endif
374
375 if (!u->first && !u->after_rewind) {
376
377 if (pa_log_ratelimit())
378 pa_log_info("Underrun!");
379
380 if (u->use_tsched)
381 adjust_after_underrun(u);
382 }
383 }
384
385 return left_to_play;
386 }
387
388 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
389 pa_bool_t work_done = TRUE;
390 pa_usec_t max_sleep_usec = 0, process_usec = 0;
391 size_t left_to_play;
392 unsigned j = 0;
393
394 pa_assert(u);
395 pa_sink_assert_ref(u->sink);
396
397 if (u->use_tsched)
398 hw_sleep_time(u, &max_sleep_usec, &process_usec);
399
400 for (;;) {
401 snd_pcm_sframes_t n;
402 size_t n_bytes;
403 int r;
404 pa_bool_t after_avail = TRUE;
405
406 /* First we determine how many samples are missing to fill the
407 * buffer up to 100% */
408
409 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
410
411 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
412 continue;
413
414 return r;
415 }
416
417 n_bytes = (size_t) n * u->frame_size;
418
419 #ifdef DEBUG_TIMING
420 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
421 #endif
422
423 left_to_play = check_left_to_play(u, n_bytes);
424
425 if (u->use_tsched)
426
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
432
433 if (!polled &&
434 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
435 #ifdef DEBUG_TIMING
436 pa_log_debug("Not filling up, because too early.");
437 #endif
438 break;
439 }
440
441 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
442
443 if (polled)
444 PA_ONCE_BEGIN {
445 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
446 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
447 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
448 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
449 pa_strnull(dn));
450 pa_xfree(dn);
451 } PA_ONCE_END;
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("Not filling up, because not necessary.");
455 #endif
456 break;
457 }
458
459
460 if (++j > 10) {
461 #ifdef DEBUG_TIMING
462 pa_log_debug("Not filling up, because already too many iterations.");
463 #endif
464
465 break;
466 }
467
468 n_bytes -= u->hwbuf_unused;
469 polled = FALSE;
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Filling up");
473 #endif
474
475 for (;;) {
476 pa_memchunk chunk;
477 void *p;
478 int err;
479 const snd_pcm_channel_area_t *areas;
480 snd_pcm_uframes_t offset, frames;
481 snd_pcm_sframes_t sframes;
482
483 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
484 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
485
486 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
487
488 if (!after_avail && err == -EAGAIN)
489 break;
490
491 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
492 continue;
493
494 return r;
495 }
496
497 /* Make sure that if these memblocks need to be copied they will fit into one slot */
498 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
499 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
500
501 if (!after_avail && frames == 0)
502 break;
503
504 pa_assert(frames > 0);
505 after_avail = FALSE;
506
507 /* Check these are multiples of 8 bit */
508 pa_assert((areas[0].first & 7) == 0);
509 pa_assert((areas[0].step & 7)== 0);
510
511 /* We assume a single interleaved memory buffer */
512 pa_assert((areas[0].first >> 3) == 0);
513 pa_assert((areas[0].step >> 3) == u->frame_size);
514
515 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
516
517 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
518 chunk.length = pa_memblock_get_length(chunk.memblock);
519 chunk.index = 0;
520
521 pa_sink_render_into_full(u->sink, &chunk);
522 pa_memblock_unref_fixed(chunk.memblock);
523
524 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
525
526 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
527 continue;
528
529 return r;
530 }
531
532 work_done = TRUE;
533
534 u->write_count += frames * u->frame_size;
535 u->since_start += frames * u->frame_size;
536
537 #ifdef DEBUG_TIMING
538 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
539 #endif
540
541 if ((size_t) frames * u->frame_size >= n_bytes)
542 break;
543
544 n_bytes -= (size_t) frames * u->frame_size;
545 }
546 }
547
548 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
549
550 if (*sleep_usec > process_usec)
551 *sleep_usec -= process_usec;
552 else
553 *sleep_usec = 0;
554
555 return work_done ? 1 : 0;
556 }
557
558 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
559 pa_bool_t work_done = FALSE;
560 pa_usec_t max_sleep_usec = 0, process_usec = 0;
561 size_t left_to_play;
562 unsigned j = 0;
563
564 pa_assert(u);
565 pa_sink_assert_ref(u->sink);
566
567 if (u->use_tsched)
568 hw_sleep_time(u, &max_sleep_usec, &process_usec);
569
570 for (;;) {
571 snd_pcm_sframes_t n;
572 size_t n_bytes;
573 int r;
574
575 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
576
577 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
578 continue;
579
580 return r;
581 }
582
583 n_bytes = (size_t) n * u->frame_size;
584 left_to_play = check_left_to_play(u, n_bytes);
585
586 if (u->use_tsched)
587
588 /* We won't fill up the playback buffer before at least
589 * half the sleep time is over because otherwise we might
590 * ask for more data from the clients then they expect. We
591 * need to guarantee that clients only have to keep around
592 * a single hw buffer length. */
593
594 if (!polled &&
595 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
596 break;
597
598 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
599
600 if (polled)
601 PA_ONCE_BEGIN {
602 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
603 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
604 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
605 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
606 pa_strnull(dn));
607 pa_xfree(dn);
608 } PA_ONCE_END;
609
610 break;
611 }
612
613 if (++j > 10) {
614 #ifdef DEBUG_TIMING
615 pa_log_debug("Not filling up, because already too many iterations.");
616 #endif
617
618 break;
619 }
620
621 n_bytes -= u->hwbuf_unused;
622 polled = FALSE;
623
624 for (;;) {
625 snd_pcm_sframes_t frames;
626 void *p;
627 pa_bool_t after_avail = TRUE;
628
629 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
630
631 if (u->memchunk.length <= 0)
632 pa_sink_render(u->sink, n_bytes, &u->memchunk);
633
634 pa_assert(u->memchunk.length > 0);
635
636 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
637
638 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
639 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
640
641 p = pa_memblock_acquire(u->memchunk.memblock);
642 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
643 pa_memblock_release(u->memchunk.memblock);
644
645 if (PA_UNLIKELY(frames < 0)) {
646
647 if (!after_avail && (int) frames == -EAGAIN)
648 break;
649
650 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
651 continue;
652
653 return r;
654 }
655
656 if (!after_avail && frames == 0)
657 break;
658
659 pa_assert(frames > 0);
660 after_avail = FALSE;
661
662 u->memchunk.index += (size_t) frames * u->frame_size;
663 u->memchunk.length -= (size_t) frames * u->frame_size;
664
665 if (u->memchunk.length <= 0) {
666 pa_memblock_unref(u->memchunk.memblock);
667 pa_memchunk_reset(&u->memchunk);
668 }
669
670 work_done = TRUE;
671
672 u->write_count += frames * u->frame_size;
673 u->since_start += frames * u->frame_size;
674
675 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
676
677 if ((size_t) frames * u->frame_size >= n_bytes)
678 break;
679
680 n_bytes -= (size_t) frames * u->frame_size;
681 }
682 }
683
684 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
685
686 if (*sleep_usec > process_usec)
687 *sleep_usec -= process_usec;
688 else
689 *sleep_usec = 0;
690
691 return work_done ? 1 : 0;
692 }
693
694 static void update_smoother(struct userdata *u) {
695 snd_pcm_sframes_t delay = 0;
696 int64_t position;
697 int err;
698 pa_usec_t now1 = 0, now2;
699 snd_pcm_status_t *status;
700
701 snd_pcm_status_alloca(&status);
702
703 pa_assert(u);
704 pa_assert(u->pcm_handle);
705
706 /* Let's update the time smoother */
707
708 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
709 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
710 return;
711 }
712
713 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
714 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
715 else {
716 snd_htimestamp_t htstamp = { 0, 0 };
717 snd_pcm_status_get_htstamp(status, &htstamp);
718 now1 = pa_timespec_load(&htstamp);
719 }
720
721 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
722
723 if (PA_UNLIKELY(position < 0))
724 position = 0;
725
726 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
727 if (now1 <= 0)
728 now1 = pa_rtclock_now();
729
730 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
731
732 pa_smoother_put(u->smoother, now1, now2);
733 }
734
735 static pa_usec_t sink_get_latency(struct userdata *u) {
736 pa_usec_t r;
737 int64_t delay;
738 pa_usec_t now1, now2;
739
740 pa_assert(u);
741
742 now1 = pa_rtclock_now();
743 now2 = pa_smoother_get(u->smoother, now1);
744
745 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
746
747 r = delay >= 0 ? (pa_usec_t) delay : 0;
748
749 if (u->memchunk.memblock)
750 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
751
752 return r;
753 }
754
755 static int build_pollfd(struct userdata *u) {
756 pa_assert(u);
757 pa_assert(u->pcm_handle);
758
759 if (u->alsa_rtpoll_item)
760 pa_rtpoll_item_free(u->alsa_rtpoll_item);
761
762 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
763 return -1;
764
765 return 0;
766 }
767
768 /* Called from IO context */
769 static int suspend(struct userdata *u) {
770 pa_assert(u);
771 pa_assert(u->pcm_handle);
772
773 pa_smoother_pause(u->smoother, pa_rtclock_now());
774
775 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
776 * take awfully long with our long buffer sizes today. */
777 snd_pcm_close(u->pcm_handle);
778 u->pcm_handle = NULL;
779
780 if (u->alsa_rtpoll_item) {
781 pa_rtpoll_item_free(u->alsa_rtpoll_item);
782 u->alsa_rtpoll_item = NULL;
783 }
784
785 pa_log_info("Device suspended...");
786
787 return 0;
788 }
789
790 /* Called from IO context */
791 static int update_sw_params(struct userdata *u) {
792 snd_pcm_uframes_t avail_min;
793 int err;
794
795 pa_assert(u);
796
797 /* Use the full buffer if noone asked us for anything specific */
798 u->hwbuf_unused = 0;
799
800 if (u->use_tsched) {
801 pa_usec_t latency;
802
803 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
804 size_t b;
805
806 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
807
808 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
809
810 /* We need at least one sample in our buffer */
811
812 if (PA_UNLIKELY(b < u->frame_size))
813 b = u->frame_size;
814
815 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
816 }
817
818 fix_min_sleep_wakeup(u);
819 fix_tsched_watermark(u);
820 }
821
822 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
823
824 /* We need at last one frame in the used part of the buffer */
825 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
826
827 if (u->use_tsched) {
828 pa_usec_t sleep_usec, process_usec;
829
830 hw_sleep_time(u, &sleep_usec, &process_usec);
831 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
832 }
833
834 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
835
836 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
837 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
838 return err;
839 }
840
841 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
842
843 return 0;
844 }
845
846 /* Called from IO context */
847 static int unsuspend(struct userdata *u) {
848 pa_sample_spec ss;
849 int err;
850 pa_bool_t b, d;
851 unsigned nfrags;
852 snd_pcm_uframes_t period_size;
853
854 pa_assert(u);
855 pa_assert(!u->pcm_handle);
856
857 pa_log_info("Trying resume...");
858
859 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
860 /*SND_PCM_NONBLOCK|*/
861 SND_PCM_NO_AUTO_RESAMPLE|
862 SND_PCM_NO_AUTO_CHANNELS|
863 SND_PCM_NO_AUTO_FORMAT)) < 0) {
864 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
865 goto fail;
866 }
867
868 ss = u->sink->sample_spec;
869 nfrags = u->nfragments;
870 period_size = u->fragment_size / u->frame_size;
871 b = u->use_mmap;
872 d = u->use_tsched;
873
874 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
875 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
876 goto fail;
877 }
878
879 if (b != u->use_mmap || d != u->use_tsched) {
880 pa_log_warn("Resume failed, couldn't get original access mode.");
881 goto fail;
882 }
883
884 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
885 pa_log_warn("Resume failed, couldn't restore original sample settings.");
886 goto fail;
887 }
888
889 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
890 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
891 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
892 (unsigned long) nfrags, period_size * u->frame_size);
893 goto fail;
894 }
895
896 if (update_sw_params(u) < 0)
897 goto fail;
898
899 if (build_pollfd(u) < 0)
900 goto fail;
901
902 u->write_count = 0;
903 pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
904
905 u->first = TRUE;
906 u->since_start = 0;
907
908
909 pa_log_info("Resumed successfully...");
910
911 return 0;
912
913 fail:
914 if (u->pcm_handle) {
915 snd_pcm_close(u->pcm_handle);
916 u->pcm_handle = NULL;
917 }
918
919 return -1;
920 }
921
922 /* Called from IO context */
923 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
924 struct userdata *u = PA_SINK(o)->userdata;
925
926 switch (code) {
927
928 case PA_SINK_MESSAGE_GET_LATENCY: {
929 pa_usec_t r = 0;
930
931 if (u->pcm_handle)
932 r = sink_get_latency(u);
933
934 *((pa_usec_t*) data) = r;
935
936 return 0;
937 }
938
939 case PA_SINK_MESSAGE_SET_STATE:
940
941 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
942
943 case PA_SINK_SUSPENDED:
944 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
945
946 if (suspend(u) < 0)
947 return -1;
948
949 break;
950
951 case PA_SINK_IDLE:
952 case PA_SINK_RUNNING:
953
954 if (u->sink->thread_info.state == PA_SINK_INIT) {
955 if (build_pollfd(u) < 0)
956 return -1;
957 }
958
959 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
960 if (unsuspend(u) < 0)
961 return -1;
962 }
963
964 break;
965
966 case PA_SINK_UNLINKED:
967 case PA_SINK_INIT:
968 case PA_SINK_INVALID_STATE:
969 ;
970 }
971
972 break;
973 }
974
975 return pa_sink_process_msg(o, code, data, offset, chunk);
976 }
977
978 /* Called from main context */
979 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
980 pa_sink_state_t old_state;
981 struct userdata *u;
982
983 pa_sink_assert_ref(s);
984 pa_assert_se(u = s->userdata);
985
986 old_state = pa_sink_get_state(u->sink);
987
988 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
989 reserve_done(u);
990 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
991 if (reserve_init(u, u->device_name) < 0)
992 return -1;
993
994 return 0;
995 }
996
997 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
998 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
999
1000 pa_assert(u);
1001 pa_assert(u->mixer_handle);
1002
1003 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1004 return 0;
1005
1006 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1007 pa_sink_get_volume(u->sink, TRUE, FALSE);
1008 pa_sink_get_mute(u->sink, TRUE);
1009 }
1010
1011 return 0;
1012 }
1013
1014 static void sink_get_volume_cb(pa_sink *s) {
1015 struct userdata *u = s->userdata;
1016 pa_cvolume r;
1017 char t[PA_CVOLUME_SNPRINT_MAX];
1018
1019 pa_assert(u);
1020 pa_assert(u->mixer_path);
1021 pa_assert(u->mixer_handle);
1022
1023 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1024 return;
1025
1026 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1027 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1028
1029 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1030
1031 if (pa_cvolume_equal(&u->hardware_volume, &r))
1032 return;
1033
1034 s->virtual_volume = u->hardware_volume = r;
1035
1036 if (u->mixer_path->has_dB) {
1037 pa_cvolume reset;
1038
1039 /* Hmm, so the hardware volume changed, let's reset our software volume */
1040 pa_cvolume_reset(&reset, s->sample_spec.channels);
1041 pa_sink_set_soft_volume(s, &reset);
1042 }
1043 }
1044
1045 static void sink_set_volume_cb(pa_sink *s) {
1046 struct userdata *u = s->userdata;
1047 pa_cvolume r;
1048 char t[PA_CVOLUME_SNPRINT_MAX];
1049
1050 pa_assert(u);
1051 pa_assert(u->mixer_path);
1052 pa_assert(u->mixer_handle);
1053
1054 /* Shift up by the base volume */
1055 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1056
1057 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1058 return;
1059
1060 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1061 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1062
1063 u->hardware_volume = r;
1064
1065 if (u->mixer_path->has_dB) {
1066
1067 /* Match exactly what the user requested by software */
1068 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1069
1070 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1071 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1072 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1073
1074 } else {
1075 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1076
1077 /* We can't match exactly what the user requested, hence let's
1078 * at least tell the user about it */
1079
1080 s->virtual_volume = r;
1081 }
1082 }
1083
1084 static void sink_get_mute_cb(pa_sink *s) {
1085 struct userdata *u = s->userdata;
1086 pa_bool_t b;
1087
1088 pa_assert(u);
1089 pa_assert(u->mixer_path);
1090 pa_assert(u->mixer_handle);
1091
1092 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1093 return;
1094
1095 s->muted = b;
1096 }
1097
1098 static void sink_set_mute_cb(pa_sink *s) {
1099 struct userdata *u = s->userdata;
1100
1101 pa_assert(u);
1102 pa_assert(u->mixer_path);
1103 pa_assert(u->mixer_handle);
1104
1105 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1106 }
1107
1108 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1109 struct userdata *u = s->userdata;
1110 pa_alsa_port_data *data;
1111
1112 pa_assert(u);
1113 pa_assert(p);
1114 pa_assert(u->mixer_handle);
1115
1116 data = PA_DEVICE_PORT_DATA(p);
1117
1118 pa_assert_se(u->mixer_path = data->path);
1119 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1120
1121 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1122 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1123 s->n_volume_steps = PA_VOLUME_NORM+1;
1124
1125 if (u->mixer_path->max_dB > 0.0)
1126 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1127 else
1128 pa_log_info("No particular base volume set, fixing to 0 dB");
1129 } else {
1130 s->base_volume = PA_VOLUME_NORM;
1131 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1132 }
1133
1134 if (data->setting)
1135 pa_alsa_setting_select(data->setting, u->mixer_handle);
1136
1137 if (s->set_mute)
1138 s->set_mute(s);
1139 if (s->set_volume)
1140 s->set_volume(s);
1141
1142 return 0;
1143 }
1144
1145 static void sink_update_requested_latency_cb(pa_sink *s) {
1146 struct userdata *u = s->userdata;
1147 size_t before;
1148 pa_assert(u);
1149
1150 if (!u->pcm_handle)
1151 return;
1152
1153 before = u->hwbuf_unused;
1154 update_sw_params(u);
1155
1156 /* Let's check whether we now use only a smaller part of the
1157 buffer then before. If so, we need to make sure that subsequent
1158 rewinds are relative to the new maximum fill level and not to the
1159 current fill level. Thus, let's do a full rewind once, to clear
1160 things up. */
1161
1162 if (u->hwbuf_unused > before) {
1163 pa_log_debug("Requesting rewind due to latency change.");
1164 pa_sink_request_rewind(s, (size_t) -1);
1165 }
1166 }
1167
1168 static int process_rewind(struct userdata *u) {
1169 snd_pcm_sframes_t unused;
1170 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1171 pa_assert(u);
1172
1173 /* Figure out how much we shall rewind and reset the counter */
1174 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1175
1176 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1177
1178 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1179 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1180 return -1;
1181 }
1182
1183 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1184
1185 if (u->hwbuf_size > unused_nbytes)
1186 limit_nbytes = u->hwbuf_size - unused_nbytes;
1187 else
1188 limit_nbytes = 0;
1189
1190 if (rewind_nbytes > limit_nbytes)
1191 rewind_nbytes = limit_nbytes;
1192
1193 if (rewind_nbytes > 0) {
1194 snd_pcm_sframes_t in_frames, out_frames;
1195
1196 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1197
1198 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1199 pa_log_debug("before: %lu", (unsigned long) in_frames);
1200 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1201 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1202 return -1;
1203 }
1204 pa_log_debug("after: %lu", (unsigned long) out_frames);
1205
1206 rewind_nbytes = (size_t) out_frames * u->frame_size;
1207
1208 if (rewind_nbytes <= 0)
1209 pa_log_info("Tried rewind, but was apparently not possible.");
1210 else {
1211 u->write_count -= rewind_nbytes;
1212 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1213 pa_sink_process_rewind(u->sink, rewind_nbytes);
1214
1215 u->after_rewind = TRUE;
1216 return 0;
1217 }
1218 } else
1219 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1220
1221 pa_sink_process_rewind(u->sink, 0);
1222 return 0;
1223 }
1224
1225 static void thread_func(void *userdata) {
1226 struct userdata *u = userdata;
1227 unsigned short revents = 0;
1228
1229 pa_assert(u);
1230
1231 pa_log_debug("Thread starting up");
1232
1233 if (u->core->realtime_scheduling)
1234 pa_make_realtime(u->core->realtime_priority);
1235
1236 pa_thread_mq_install(&u->thread_mq);
1237
1238 for (;;) {
1239 int ret;
1240
1241 #ifdef DEBUG_TIMING
1242 pa_log_debug("Loop");
1243 #endif
1244
1245 /* Render some data and write it to the dsp */
1246 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1247 int work_done;
1248 pa_usec_t sleep_usec = 0;
1249
1250 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1251 if (process_rewind(u) < 0)
1252 goto fail;
1253
1254 if (u->use_mmap)
1255 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1256 else
1257 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1258
1259 if (work_done < 0)
1260 goto fail;
1261
1262 /* pa_log_debug("work_done = %i", work_done); */
1263
1264 if (work_done) {
1265
1266 if (u->first) {
1267 pa_log_info("Starting playback.");
1268 snd_pcm_start(u->pcm_handle);
1269
1270 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1271 }
1272
1273 update_smoother(u);
1274 }
1275
1276 if (u->use_tsched) {
1277 pa_usec_t cusec;
1278
1279 if (u->since_start <= u->hwbuf_size) {
1280
1281 /* USB devices on ALSA seem to hit a buffer
1282 * underrun during the first iterations much
1283 * quicker then we calculate here, probably due to
1284 * the transport latency. To accommodate for that
1285 * we artificially decrease the sleep time until
1286 * we have filled the buffer at least once
1287 * completely.*/
1288
1289 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1290 sleep_usec /= 2;
1291 }
1292
1293 /* OK, the playback buffer is now full, let's
1294 * calculate when to wake up next */
1295 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1296
1297 /* Convert from the sound card time domain to the
1298 * system time domain */
1299 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1300
1301 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1302
1303 /* We don't trust the conversion, so we wake up whatever comes first */
1304 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1305 }
1306
1307 u->first = FALSE;
1308 u->after_rewind = FALSE;
1309
1310 } else if (u->use_tsched)
1311
1312 /* OK, we're in an invalid state, let's disable our timers */
1313 pa_rtpoll_set_timer_disabled(u->rtpoll);
1314
1315 /* Hmm, nothing to do. Let's sleep */
1316 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1317 goto fail;
1318
1319 if (ret == 0)
1320 goto finish;
1321
1322 /* Tell ALSA about this and process its response */
1323 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1324 struct pollfd *pollfd;
1325 int err;
1326 unsigned n;
1327
1328 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1329
1330 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1331 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1332 goto fail;
1333 }
1334
1335 if (revents & ~POLLOUT) {
1336 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1337 goto fail;
1338
1339 u->first = TRUE;
1340 u->since_start = 0;
1341 } else if (revents && u->use_tsched && pa_log_ratelimit())
1342 pa_log_debug("Wakeup from ALSA!");
1343
1344 } else
1345 revents = 0;
1346 }
1347
1348 fail:
1349 /* If this was no regular exit from the loop we have to continue
1350 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1351 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1352 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1353
1354 finish:
1355 pa_log_debug("Thread shutting down");
1356 }
1357
1358 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1359 const char *n;
1360 char *t;
1361
1362 pa_assert(data);
1363 pa_assert(ma);
1364 pa_assert(device_name);
1365
1366 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1367 pa_sink_new_data_set_name(data, n);
1368 data->namereg_fail = TRUE;
1369 return;
1370 }
1371
1372 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1373 data->namereg_fail = TRUE;
1374 else {
1375 n = device_id ? device_id : device_name;
1376 data->namereg_fail = FALSE;
1377 }
1378
1379 if (mapping)
1380 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1381 else
1382 t = pa_sprintf_malloc("alsa_output.%s", n);
1383
1384 pa_sink_new_data_set_name(data, t);
1385 pa_xfree(t);
1386 }
1387
1388 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1389
1390 if (!mapping && !element)
1391 return;
1392
1393 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1394 pa_log_info("Failed to find a working mixer device.");
1395 return;
1396 }
1397
1398 if (element) {
1399
1400 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1401 goto fail;
1402
1403 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1404 goto fail;
1405
1406 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1407 pa_alsa_path_dump(u->mixer_path);
1408 } else {
1409
1410 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1411 goto fail;
1412
1413 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1414
1415 pa_log_debug("Probed mixer paths:");
1416 pa_alsa_path_set_dump(u->mixer_path_set);
1417 }
1418
1419 return;
1420
1421 fail:
1422
1423 if (u->mixer_path_set) {
1424 pa_alsa_path_set_free(u->mixer_path_set);
1425 u->mixer_path_set = NULL;
1426 } else if (u->mixer_path) {
1427 pa_alsa_path_free(u->mixer_path);
1428 u->mixer_path = NULL;
1429 }
1430
1431 if (u->mixer_handle) {
1432 snd_mixer_close(u->mixer_handle);
1433 u->mixer_handle = NULL;
1434 }
1435 }
1436
1437 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1438 pa_assert(u);
1439
1440 if (!u->mixer_handle)
1441 return 0;
1442
1443 if (u->sink->active_port) {
1444 pa_alsa_port_data *data;
1445
1446 /* We have a list of supported paths, so let's activate the
1447 * one that has been chosen as active */
1448
1449 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1450 u->mixer_path = data->path;
1451
1452 pa_alsa_path_select(data->path, u->mixer_handle);
1453
1454 if (data->setting)
1455 pa_alsa_setting_select(data->setting, u->mixer_handle);
1456
1457 } else {
1458
1459 if (!u->mixer_path && u->mixer_path_set)
1460 u->mixer_path = u->mixer_path_set->paths;
1461
1462 if (u->mixer_path) {
1463 /* Hmm, we have only a single path, then let's activate it */
1464
1465 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1466
1467 if (u->mixer_path->settings)
1468 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1469 } else
1470 return 0;
1471 }
1472
1473 if (!u->mixer_path->has_volume)
1474 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1475 else {
1476
1477 if (u->mixer_path->has_dB) {
1478 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1479
1480 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1481 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1482
1483 if (u->mixer_path->max_dB > 0.0)
1484 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1485 else
1486 pa_log_info("No particular base volume set, fixing to 0 dB");
1487
1488 } else {
1489 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1490 u->sink->base_volume = PA_VOLUME_NORM;
1491 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1492 }
1493
1494 u->sink->get_volume = sink_get_volume_cb;
1495 u->sink->set_volume = sink_set_volume_cb;
1496
1497 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1498 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1499 }
1500
1501 if (!u->mixer_path->has_mute) {
1502 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1503 } else {
1504 u->sink->get_mute = sink_get_mute_cb;
1505 u->sink->set_mute = sink_set_mute_cb;
1506 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1507 pa_log_info("Using hardware mute control.");
1508 }
1509
1510 u->mixer_fdl = pa_alsa_fdlist_new();
1511
1512 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1513 pa_log("Failed to initialize file descriptor monitoring");
1514 return -1;
1515 }
1516
1517 if (u->mixer_path_set)
1518 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1519 else
1520 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1521
1522 return 0;
1523 }
1524
1525 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1526
1527 struct userdata *u = NULL;
1528 const char *dev_id = NULL;
1529 pa_sample_spec ss, requested_ss;
1530 pa_channel_map map;
1531 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1532 snd_pcm_uframes_t period_frames, tsched_frames;
1533 size_t frame_size;
1534 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1535 pa_sink_new_data data;
1536 pa_alsa_profile_set *profile_set = NULL;
1537
1538 pa_assert(m);
1539 pa_assert(ma);
1540
1541 ss = m->core->default_sample_spec;
1542 map = m->core->default_channel_map;
1543 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1544 pa_log("Failed to parse sample specification and channel map");
1545 goto fail;
1546 }
1547
1548 requested_ss = ss;
1549 frame_size = pa_frame_size(&ss);
1550
1551 nfrags = m->core->default_n_fragments;
1552 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1553 if (frag_size <= 0)
1554 frag_size = (uint32_t) frame_size;
1555 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1556 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1557
1558 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1559 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1560 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1561 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1562 pa_log("Failed to parse buffer metrics");
1563 goto fail;
1564 }
1565
1566 hwbuf_size = frag_size * nfrags;
1567 period_frames = frag_size/frame_size;
1568 tsched_frames = tsched_size/frame_size;
1569
1570 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1571 pa_log("Failed to parse mmap argument.");
1572 goto fail;
1573 }
1574
1575 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1576 pa_log("Failed to parse tsched argument.");
1577 goto fail;
1578 }
1579
1580 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1581 pa_log("Failed to parse ignore_dB argument.");
1582 goto fail;
1583 }
1584
1585 if (use_tsched && !pa_rtclock_hrtimer()) {
1586 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1587 use_tsched = FALSE;
1588 }
1589
1590 u = pa_xnew0(struct userdata, 1);
1591 u->core = m->core;
1592 u->module = m;
1593 u->use_mmap = use_mmap;
1594 u->use_tsched = use_tsched;
1595 u->first = TRUE;
1596 u->rtpoll = pa_rtpoll_new();
1597 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1598
1599 u->smoother = pa_smoother_new(
1600 DEFAULT_TSCHED_BUFFER_USEC*2,
1601 DEFAULT_TSCHED_BUFFER_USEC*2,
1602 TRUE,
1603 TRUE,
1604 5,
1605 pa_rtclock_now(),
1606 TRUE);
1607
1608 dev_id = pa_modargs_get_value(
1609 ma, "device_id",
1610 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1611
1612 if (reserve_init(u, dev_id) < 0)
1613 goto fail;
1614
1615 if (reserve_monitor_init(u, dev_id) < 0)
1616 goto fail;
1617
1618 b = use_mmap;
1619 d = use_tsched;
1620
1621 if (mapping) {
1622
1623 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1624 pa_log("device_id= not set");
1625 goto fail;
1626 }
1627
1628 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1629 dev_id,
1630 &u->device_name,
1631 &ss, &map,
1632 SND_PCM_STREAM_PLAYBACK,
1633 &nfrags, &period_frames, tsched_frames,
1634 &b, &d, mapping)))
1635
1636 goto fail;
1637
1638 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1639
1640 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1641 goto fail;
1642
1643 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1644 dev_id,
1645 &u->device_name,
1646 &ss, &map,
1647 SND_PCM_STREAM_PLAYBACK,
1648 &nfrags, &period_frames, tsched_frames,
1649 &b, &d, profile_set, &mapping)))
1650
1651 goto fail;
1652
1653 } else {
1654
1655 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1656 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1657 &u->device_name,
1658 &ss, &map,
1659 SND_PCM_STREAM_PLAYBACK,
1660 &nfrags, &period_frames, tsched_frames,
1661 &b, &d, FALSE)))
1662 goto fail;
1663 }
1664
1665 pa_assert(u->device_name);
1666 pa_log_info("Successfully opened device %s.", u->device_name);
1667
1668 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1669 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1670 goto fail;
1671 }
1672
1673 if (mapping)
1674 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1675
1676 if (use_mmap && !b) {
1677 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1678 u->use_mmap = use_mmap = FALSE;
1679 }
1680
1681 if (use_tsched && (!b || !d)) {
1682 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1683 u->use_tsched = use_tsched = FALSE;
1684 }
1685
1686 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1687 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1688 u->use_tsched = use_tsched = FALSE;
1689 }
1690
1691 if (u->use_mmap)
1692 pa_log_info("Successfully enabled mmap() mode.");
1693
1694 if (u->use_tsched)
1695 pa_log_info("Successfully enabled timer-based scheduling mode.");
1696
1697 /* ALSA might tweak the sample spec, so recalculate the frame size */
1698 frame_size = pa_frame_size(&ss);
1699
1700 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1701
1702 pa_sink_new_data_init(&data);
1703 data.driver = driver;
1704 data.module = m;
1705 data.card = card;
1706 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1707 pa_sink_new_data_set_sample_spec(&data, &ss);
1708 pa_sink_new_data_set_channel_map(&data, &map);
1709
1710 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1711 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1712 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1713 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1714 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1715
1716 if (mapping) {
1717 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1718 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1719 }
1720
1721 pa_alsa_init_description(data.proplist);
1722
1723 if (u->control_device)
1724 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1725
1726 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1727 pa_log("Invalid properties");
1728 pa_sink_new_data_done(&data);
1729 goto fail;
1730 }
1731
1732 if (u->mixer_path_set)
1733 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1734
1735 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1736 pa_sink_new_data_done(&data);
1737
1738 if (!u->sink) {
1739 pa_log("Failed to create sink object");
1740 goto fail;
1741 }
1742
1743 u->sink->parent.process_msg = sink_process_msg;
1744 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1745 u->sink->set_state = sink_set_state_cb;
1746 u->sink->set_port = sink_set_port_cb;
1747 u->sink->userdata = u;
1748
1749 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1750 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1751
1752 u->frame_size = frame_size;
1753 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1754 u->nfragments = nfrags;
1755 u->hwbuf_size = u->fragment_size * nfrags;
1756 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1757 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1758
1759 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1760 nfrags, (long unsigned) u->fragment_size,
1761 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1762
1763 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1764 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1765
1766 if (u->use_tsched) {
1767 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1768
1769 fix_min_sleep_wakeup(u);
1770 fix_tsched_watermark(u);
1771
1772 pa_sink_set_latency_range(u->sink,
1773 0,
1774 pa_bytes_to_usec(u->hwbuf_size, &ss));
1775
1776 pa_log_info("Time scheduling watermark is %0.2fms",
1777 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1778 } else
1779 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1780
1781 reserve_update(u);
1782
1783 if (update_sw_params(u) < 0)
1784 goto fail;
1785
1786 if (setup_mixer(u, ignore_dB) < 0)
1787 goto fail;
1788
1789 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1790
1791 if (!(u->thread = pa_thread_new(thread_func, u))) {
1792 pa_log("Failed to create thread.");
1793 goto fail;
1794 }
1795
1796 /* Get initial mixer settings */
1797 if (data.volume_is_set) {
1798 if (u->sink->set_volume)
1799 u->sink->set_volume(u->sink);
1800 } else {
1801 if (u->sink->get_volume)
1802 u->sink->get_volume(u->sink);
1803 }
1804
1805 if (data.muted_is_set) {
1806 if (u->sink->set_mute)
1807 u->sink->set_mute(u->sink);
1808 } else {
1809 if (u->sink->get_mute)
1810 u->sink->get_mute(u->sink);
1811 }
1812
1813 pa_sink_put(u->sink);
1814
1815 if (profile_set)
1816 pa_alsa_profile_set_free(profile_set);
1817
1818 return u->sink;
1819
1820 fail:
1821
1822 if (u)
1823 userdata_free(u);
1824
1825 if (profile_set)
1826 pa_alsa_profile_set_free(profile_set);
1827
1828 return NULL;
1829 }
1830
1831 static void userdata_free(struct userdata *u) {
1832 pa_assert(u);
1833
1834 if (u->sink)
1835 pa_sink_unlink(u->sink);
1836
1837 if (u->thread) {
1838 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1839 pa_thread_free(u->thread);
1840 }
1841
1842 pa_thread_mq_done(&u->thread_mq);
1843
1844 if (u->sink)
1845 pa_sink_unref(u->sink);
1846
1847 if (u->memchunk.memblock)
1848 pa_memblock_unref(u->memchunk.memblock);
1849
1850 if (u->alsa_rtpoll_item)
1851 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1852
1853 if (u->rtpoll)
1854 pa_rtpoll_free(u->rtpoll);
1855
1856 if (u->pcm_handle) {
1857 snd_pcm_drop(u->pcm_handle);
1858 snd_pcm_close(u->pcm_handle);
1859 }
1860
1861 if (u->mixer_fdl)
1862 pa_alsa_fdlist_free(u->mixer_fdl);
1863
1864 if (u->mixer_path_set)
1865 pa_alsa_path_set_free(u->mixer_path_set);
1866 else if (u->mixer_path)
1867 pa_alsa_path_free(u->mixer_path);
1868
1869 if (u->mixer_handle)
1870 snd_mixer_close(u->mixer_handle);
1871
1872 if (u->smoother)
1873 pa_smoother_free(u->smoother);
1874
1875 reserve_done(u);
1876 monitor_done(u);
1877
1878 pa_xfree(u->device_name);
1879 pa_xfree(u->control_device);
1880 pa_xfree(u);
1881 }
1882
1883 void pa_alsa_sink_free(pa_sink *s) {
1884 struct userdata *u;
1885
1886 pa_sink_assert_ref(s);
1887 pa_assert_se(u = s->userdata);
1888
1889 userdata_free(u);
1890 }