]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
pulse: move pa_rtclock_now in pulsecommon
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/i18n.h>
36 #include <pulse/rtclock.h>
37 #include <pulse/timeval.h>
38 #include <pulse/util.h>
39 #include <pulse/xmalloc.h>
40
41 #include <pulsecore/core.h>
42 #include <pulsecore/module.h>
43 #include <pulsecore/memchunk.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/modargs.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/sample-util.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/macro.h>
51 #include <pulsecore/thread.h>
52 #include <pulsecore/core-error.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
66 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
67 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
68 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
69 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
70
71 struct userdata {
72 pa_core *core;
73 pa_module *module;
74 pa_sink *sink;
75
76 pa_thread *thread;
77 pa_thread_mq thread_mq;
78 pa_rtpoll *rtpoll;
79
80 snd_pcm_t *pcm_handle;
81
82 pa_alsa_fdlist *mixer_fdl;
83 snd_mixer_t *mixer_handle;
84 pa_alsa_path_set *mixer_path_set;
85 pa_alsa_path *mixer_path;
86
87 pa_cvolume hardware_volume;
88
89 size_t
90 frame_size,
91 fragment_size,
92 hwbuf_size,
93 tsched_watermark,
94 hwbuf_unused,
95 min_sleep,
96 min_wakeup,
97 watermark_step;
98
99 unsigned nfragments;
100 pa_memchunk memchunk;
101
102 char *device_name; /* name of the PCM device */
103 char *control_device; /* name of the control device */
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 pa_reserve_monitor_wrapper *monitor;
120 pa_hook_slot *monitor_slot;
121 };
122
123 static void userdata_free(struct userdata *u);
124
125 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
126 pa_assert(r);
127 pa_assert(u);
128
129 if (pa_sink_suspend(u->sink, TRUE, PA_SUSPEND_APPLICATION) < 0)
130 return PA_HOOK_CANCEL;
131
132 return PA_HOOK_OK;
133 }
134
135 static void reserve_done(struct userdata *u) {
136 pa_assert(u);
137
138 if (u->reserve_slot) {
139 pa_hook_slot_free(u->reserve_slot);
140 u->reserve_slot = NULL;
141 }
142
143 if (u->reserve) {
144 pa_reserve_wrapper_unref(u->reserve);
145 u->reserve = NULL;
146 }
147 }
148
149 static void reserve_update(struct userdata *u) {
150 const char *description;
151 pa_assert(u);
152
153 if (!u->sink || !u->reserve)
154 return;
155
156 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
157 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
158 }
159
160 static int reserve_init(struct userdata *u, const char *dname) {
161 char *rname;
162
163 pa_assert(u);
164 pa_assert(dname);
165
166 if (u->reserve)
167 return 0;
168
169 if (pa_in_system_mode())
170 return 0;
171
172 /* We are resuming, try to lock the device */
173 if (!(rname = pa_alsa_get_reserve_name(dname)))
174 return 0;
175
176 u->reserve = pa_reserve_wrapper_get(u->core, rname);
177 pa_xfree(rname);
178
179 if (!(u->reserve))
180 return -1;
181
182 reserve_update(u);
183
184 pa_assert(!u->reserve_slot);
185 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
186
187 return 0;
188 }
189
190 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
191 pa_bool_t b;
192
193 pa_assert(w);
194 pa_assert(u);
195
196 b = PA_PTR_TO_UINT(busy) && !u->reserve;
197
198 pa_sink_suspend(u->sink, b, PA_SUSPEND_APPLICATION);
199 return PA_HOOK_OK;
200 }
201
202 static void monitor_done(struct userdata *u) {
203 pa_assert(u);
204
205 if (u->monitor_slot) {
206 pa_hook_slot_free(u->monitor_slot);
207 u->monitor_slot = NULL;
208 }
209
210 if (u->monitor) {
211 pa_reserve_monitor_wrapper_unref(u->monitor);
212 u->monitor = NULL;
213 }
214 }
215
216 static int reserve_monitor_init(struct userdata *u, const char *dname) {
217 char *rname;
218
219 pa_assert(u);
220 pa_assert(dname);
221
222 if (pa_in_system_mode())
223 return 0;
224
225 /* We are resuming, try to lock the device */
226 if (!(rname = pa_alsa_get_reserve_name(dname)))
227 return 0;
228
229 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
230 pa_xfree(rname);
231
232 if (!(u->monitor))
233 return -1;
234
235 pa_assert(!u->monitor_slot);
236 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
237
238 return 0;
239 }
240
241 static void fix_min_sleep_wakeup(struct userdata *u) {
242 size_t max_use, max_use_2;
243
244 pa_assert(u);
245
246 max_use = u->hwbuf_size - u->hwbuf_unused;
247 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
248
249 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
250 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
251
252 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
253 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
254 }
255
256 static void fix_tsched_watermark(struct userdata *u) {
257 size_t max_use;
258 pa_assert(u);
259
260 max_use = u->hwbuf_size - u->hwbuf_unused;
261
262 if (u->tsched_watermark > max_use - u->min_sleep)
263 u->tsched_watermark = max_use - u->min_sleep;
264
265 if (u->tsched_watermark < u->min_wakeup)
266 u->tsched_watermark = u->min_wakeup;
267 }
268
269 static void adjust_after_underrun(struct userdata *u) {
270 size_t old_watermark;
271 pa_usec_t old_min_latency, new_min_latency;
272
273 pa_assert(u);
274 pa_assert(u->use_tsched);
275
276 /* First, just try to increase the watermark */
277 old_watermark = u->tsched_watermark;
278 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
279 fix_tsched_watermark(u);
280
281 if (old_watermark != u->tsched_watermark) {
282 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
283 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
284 return;
285 }
286
287 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
288 old_min_latency = u->sink->thread_info.min_latency;
289 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
290 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
291
292 if (old_min_latency != new_min_latency) {
293 pa_log_notice("Increasing minimal latency to %0.2f ms",
294 (double) new_min_latency / PA_USEC_PER_MSEC);
295
296 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
297 return;
298 }
299
300 /* When we reach this we're officialy fucked! */
301 }
302
303 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
304 pa_usec_t usec, wm;
305
306 pa_assert(sleep_usec);
307 pa_assert(process_usec);
308
309 pa_assert(u);
310
311 usec = pa_sink_get_requested_latency_within_thread(u->sink);
312
313 if (usec == (pa_usec_t) -1)
314 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
315
316 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
317
318 if (wm > usec)
319 wm = usec/2;
320
321 *sleep_usec = usec - wm;
322 *process_usec = wm;
323
324 #ifdef DEBUG_TIMING
325 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
326 (unsigned long) (usec / PA_USEC_PER_MSEC),
327 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
328 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
329 #endif
330 }
331
332 static int try_recover(struct userdata *u, const char *call, int err) {
333 pa_assert(u);
334 pa_assert(call);
335 pa_assert(err < 0);
336
337 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
338
339 pa_assert(err != -EAGAIN);
340
341 if (err == -EPIPE)
342 pa_log_debug("%s: Buffer underrun!", call);
343
344 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
345 pa_log("%s: %s", call, pa_alsa_strerror(err));
346 return -1;
347 }
348
349 u->first = TRUE;
350 u->since_start = 0;
351 return 0;
352 }
353
354 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
355 size_t left_to_play;
356
357 /* We use <= instead of < for this check here because an underrun
358 * only happens after the last sample was processed, not already when
359 * it is removed from the buffer. This is particularly important
360 * when block transfer is used. */
361
362 if (n_bytes <= u->hwbuf_size) {
363 left_to_play = u->hwbuf_size - n_bytes;
364
365 #ifdef DEBUG_TIMING
366 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
367 #endif
368
369 } else {
370 left_to_play = 0;
371
372 #ifdef DEBUG_TIMING
373 PA_DEBUG_TRAP;
374 #endif
375
376 if (!u->first && !u->after_rewind) {
377
378 if (pa_log_ratelimit())
379 pa_log_info("Underrun!");
380
381 if (u->use_tsched)
382 adjust_after_underrun(u);
383 }
384 }
385
386 return left_to_play;
387 }
388
389 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
390 pa_bool_t work_done = TRUE;
391 pa_usec_t max_sleep_usec = 0, process_usec = 0;
392 size_t left_to_play;
393 unsigned j = 0;
394
395 pa_assert(u);
396 pa_sink_assert_ref(u->sink);
397
398 if (u->use_tsched)
399 hw_sleep_time(u, &max_sleep_usec, &process_usec);
400
401 for (;;) {
402 snd_pcm_sframes_t n;
403 size_t n_bytes;
404 int r;
405
406 /* First we determine how many samples are missing to fill the
407 * buffer up to 100% */
408
409 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
410
411 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
412 continue;
413
414 return r;
415 }
416
417 n_bytes = (size_t) n * u->frame_size;
418
419 #ifdef DEBUG_TIMING
420 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
421 #endif
422
423 left_to_play = check_left_to_play(u, n_bytes);
424
425 if (u->use_tsched)
426
427 /* We won't fill up the playback buffer before at least
428 * half the sleep time is over because otherwise we might
429 * ask for more data from the clients then they expect. We
430 * need to guarantee that clients only have to keep around
431 * a single hw buffer length. */
432
433 if (!polled &&
434 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
435 #ifdef DEBUG_TIMING
436 pa_log_debug("Not filling up, because too early.");
437 #endif
438 break;
439 }
440
441 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
442
443 if (polled)
444 PA_ONCE_BEGIN {
445 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
446 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
447 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
448 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
449 pa_strnull(dn));
450 pa_xfree(dn);
451 } PA_ONCE_END;
452
453 #ifdef DEBUG_TIMING
454 pa_log_debug("Not filling up, because not necessary.");
455 #endif
456 break;
457 }
458
459
460 if (++j > 10) {
461 #ifdef DEBUG_TIMING
462 pa_log_debug("Not filling up, because already too many iterations.");
463 #endif
464
465 break;
466 }
467
468 n_bytes -= u->hwbuf_unused;
469 polled = FALSE;
470
471 #ifdef DEBUG_TIMING
472 pa_log_debug("Filling up");
473 #endif
474
475 for (;;) {
476 pa_memchunk chunk;
477 void *p;
478 int err;
479 const snd_pcm_channel_area_t *areas;
480 snd_pcm_uframes_t offset, frames;
481 snd_pcm_sframes_t sframes;
482
483 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
484 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
485
486 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
487
488 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
489 continue;
490
491 return r;
492 }
493
494 /* Make sure that if these memblocks need to be copied they will fit into one slot */
495 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
496 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
497
498 /* Check these are multiples of 8 bit */
499 pa_assert((areas[0].first & 7) == 0);
500 pa_assert((areas[0].step & 7)== 0);
501
502 /* We assume a single interleaved memory buffer */
503 pa_assert((areas[0].first >> 3) == 0);
504 pa_assert((areas[0].step >> 3) == u->frame_size);
505
506 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
507
508 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
509 chunk.length = pa_memblock_get_length(chunk.memblock);
510 chunk.index = 0;
511
512 pa_sink_render_into_full(u->sink, &chunk);
513 pa_memblock_unref_fixed(chunk.memblock);
514
515 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
516
517 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
518 continue;
519
520 return r;
521 }
522
523 work_done = TRUE;
524
525 u->write_count += frames * u->frame_size;
526 u->since_start += frames * u->frame_size;
527
528 #ifdef DEBUG_TIMING
529 pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
530 #endif
531
532 if ((size_t) frames * u->frame_size >= n_bytes)
533 break;
534
535 n_bytes -= (size_t) frames * u->frame_size;
536 }
537 }
538
539 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
540
541 if (*sleep_usec > process_usec)
542 *sleep_usec -= process_usec;
543 else
544 *sleep_usec = 0;
545
546 return work_done ? 1 : 0;
547 }
548
549 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
550 pa_bool_t work_done = FALSE;
551 pa_usec_t max_sleep_usec = 0, process_usec = 0;
552 size_t left_to_play;
553 unsigned j = 0;
554
555 pa_assert(u);
556 pa_sink_assert_ref(u->sink);
557
558 if (u->use_tsched)
559 hw_sleep_time(u, &max_sleep_usec, &process_usec);
560
561 for (;;) {
562 snd_pcm_sframes_t n;
563 size_t n_bytes;
564 int r;
565
566 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
567
568 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
569 continue;
570
571 return r;
572 }
573
574 n_bytes = (size_t) n * u->frame_size;
575 left_to_play = check_left_to_play(u, n_bytes);
576
577 if (u->use_tsched)
578
579 /* We won't fill up the playback buffer before at least
580 * half the sleep time is over because otherwise we might
581 * ask for more data from the clients then they expect. We
582 * need to guarantee that clients only have to keep around
583 * a single hw buffer length. */
584
585 if (!polled &&
586 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
587 break;
588
589 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
590
591 if (polled)
592 PA_ONCE_BEGIN {
593 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
594 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
595 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
596 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
597 pa_strnull(dn));
598 pa_xfree(dn);
599 } PA_ONCE_END;
600
601 break;
602 }
603
604 if (++j > 10) {
605 #ifdef DEBUG_TIMING
606 pa_log_debug("Not filling up, because already too many iterations.");
607 #endif
608
609 break;
610 }
611
612 n_bytes -= u->hwbuf_unused;
613 polled = FALSE;
614
615 for (;;) {
616 snd_pcm_sframes_t frames;
617 void *p;
618
619 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
620
621 if (u->memchunk.length <= 0)
622 pa_sink_render(u->sink, n_bytes, &u->memchunk);
623
624 pa_assert(u->memchunk.length > 0);
625
626 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
627
628 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
629 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
630
631 p = pa_memblock_acquire(u->memchunk.memblock);
632 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
633 pa_memblock_release(u->memchunk.memblock);
634
635 pa_assert(frames != 0);
636
637 if (PA_UNLIKELY(frames < 0)) {
638
639 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
640 continue;
641
642 return r;
643 }
644
645 u->memchunk.index += (size_t) frames * u->frame_size;
646 u->memchunk.length -= (size_t) frames * u->frame_size;
647
648 if (u->memchunk.length <= 0) {
649 pa_memblock_unref(u->memchunk.memblock);
650 pa_memchunk_reset(&u->memchunk);
651 }
652
653 work_done = TRUE;
654
655 u->write_count += frames * u->frame_size;
656 u->since_start += frames * u->frame_size;
657
658 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
659
660 if ((size_t) frames * u->frame_size >= n_bytes)
661 break;
662
663 n_bytes -= (size_t) frames * u->frame_size;
664 }
665 }
666
667 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
668
669 if (*sleep_usec > process_usec)
670 *sleep_usec -= process_usec;
671 else
672 *sleep_usec = 0;
673
674 return work_done ? 1 : 0;
675 }
676
677 static void update_smoother(struct userdata *u) {
678 snd_pcm_sframes_t delay = 0;
679 int64_t position;
680 int err;
681 pa_usec_t now1 = 0, now2;
682 snd_pcm_status_t *status;
683
684 snd_pcm_status_alloca(&status);
685
686 pa_assert(u);
687 pa_assert(u->pcm_handle);
688
689 /* Let's update the time smoother */
690
691 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
692 pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
693 return;
694 }
695
696 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
697 pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
698 else {
699 snd_htimestamp_t htstamp = { 0, 0 };
700 snd_pcm_status_get_htstamp(status, &htstamp);
701 now1 = pa_timespec_load(&htstamp);
702 }
703
704 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
705
706 if (PA_UNLIKELY(position < 0))
707 position = 0;
708
709 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
710 if (now1 <= 0)
711 now1 = pa_rtclock_now();
712
713 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
714
715 pa_smoother_put(u->smoother, now1, now2);
716 }
717
718 static pa_usec_t sink_get_latency(struct userdata *u) {
719 pa_usec_t r;
720 int64_t delay;
721 pa_usec_t now1, now2;
722
723 pa_assert(u);
724
725 now1 = pa_rtclock_now();
726 now2 = pa_smoother_get(u->smoother, now1);
727
728 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
729
730 r = delay >= 0 ? (pa_usec_t) delay : 0;
731
732 if (u->memchunk.memblock)
733 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
734
735 return r;
736 }
737
738 static int build_pollfd(struct userdata *u) {
739 pa_assert(u);
740 pa_assert(u->pcm_handle);
741
742 if (u->alsa_rtpoll_item)
743 pa_rtpoll_item_free(u->alsa_rtpoll_item);
744
745 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
746 return -1;
747
748 return 0;
749 }
750
751 /* Called from IO context */
752 static int suspend(struct userdata *u) {
753 pa_assert(u);
754 pa_assert(u->pcm_handle);
755
756 pa_smoother_pause(u->smoother, pa_rtclock_now());
757
758 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
759 * take awfully long with our long buffer sizes today. */
760 snd_pcm_close(u->pcm_handle);
761 u->pcm_handle = NULL;
762
763 if (u->alsa_rtpoll_item) {
764 pa_rtpoll_item_free(u->alsa_rtpoll_item);
765 u->alsa_rtpoll_item = NULL;
766 }
767
768 pa_log_info("Device suspended...");
769
770 return 0;
771 }
772
773 /* Called from IO context */
774 static int update_sw_params(struct userdata *u) {
775 snd_pcm_uframes_t avail_min;
776 int err;
777
778 pa_assert(u);
779
780 /* Use the full buffer if noone asked us for anything specific */
781 u->hwbuf_unused = 0;
782
783 if (u->use_tsched) {
784 pa_usec_t latency;
785
786 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
787 size_t b;
788
789 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
790
791 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
792
793 /* We need at least one sample in our buffer */
794
795 if (PA_UNLIKELY(b < u->frame_size))
796 b = u->frame_size;
797
798 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
799 }
800
801 fix_min_sleep_wakeup(u);
802 fix_tsched_watermark(u);
803 }
804
805 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
806
807 /* We need at last one frame in the used part of the buffer */
808 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
809
810 if (u->use_tsched) {
811 pa_usec_t sleep_usec, process_usec;
812
813 hw_sleep_time(u, &sleep_usec, &process_usec);
814 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
815 }
816
817 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
818
819 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
820 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
821 return err;
822 }
823
824 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
825
826 return 0;
827 }
828
829 /* Called from IO context */
830 static int unsuspend(struct userdata *u) {
831 pa_sample_spec ss;
832 int err;
833 pa_bool_t b, d;
834 unsigned nfrags;
835 snd_pcm_uframes_t period_size;
836
837 pa_assert(u);
838 pa_assert(!u->pcm_handle);
839
840 pa_log_info("Trying resume...");
841
842 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
843 /*SND_PCM_NONBLOCK|*/
844 SND_PCM_NO_AUTO_RESAMPLE|
845 SND_PCM_NO_AUTO_CHANNELS|
846 SND_PCM_NO_AUTO_FORMAT)) < 0) {
847 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
848 goto fail;
849 }
850
851 ss = u->sink->sample_spec;
852 nfrags = u->nfragments;
853 period_size = u->fragment_size / u->frame_size;
854 b = u->use_mmap;
855 d = u->use_tsched;
856
857 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
858 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
859 goto fail;
860 }
861
862 if (b != u->use_mmap || d != u->use_tsched) {
863 pa_log_warn("Resume failed, couldn't get original access mode.");
864 goto fail;
865 }
866
867 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
868 pa_log_warn("Resume failed, couldn't restore original sample settings.");
869 goto fail;
870 }
871
872 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
873 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
874 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
875 (unsigned long) nfrags, period_size * u->frame_size);
876 goto fail;
877 }
878
879 if (update_sw_params(u) < 0)
880 goto fail;
881
882 if (build_pollfd(u) < 0)
883 goto fail;
884
885 u->first = TRUE;
886 u->since_start = 0;
887
888 pa_log_info("Resumed successfully...");
889
890 return 0;
891
892 fail:
893 if (u->pcm_handle) {
894 snd_pcm_close(u->pcm_handle);
895 u->pcm_handle = NULL;
896 }
897
898 return -1;
899 }
900
901 /* Called from IO context */
902 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
903 struct userdata *u = PA_SINK(o)->userdata;
904
905 switch (code) {
906
907 case PA_SINK_MESSAGE_GET_LATENCY: {
908 pa_usec_t r = 0;
909
910 if (u->pcm_handle)
911 r = sink_get_latency(u);
912
913 *((pa_usec_t*) data) = r;
914
915 return 0;
916 }
917
918 case PA_SINK_MESSAGE_SET_STATE:
919
920 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
921
922 case PA_SINK_SUSPENDED:
923 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
924
925 if (suspend(u) < 0)
926 return -1;
927
928 break;
929
930 case PA_SINK_IDLE:
931 case PA_SINK_RUNNING:
932
933 if (u->sink->thread_info.state == PA_SINK_INIT) {
934 if (build_pollfd(u) < 0)
935 return -1;
936 }
937
938 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
939 if (unsuspend(u) < 0)
940 return -1;
941 }
942
943 break;
944
945 case PA_SINK_UNLINKED:
946 case PA_SINK_INIT:
947 case PA_SINK_INVALID_STATE:
948 ;
949 }
950
951 break;
952 }
953
954 return pa_sink_process_msg(o, code, data, offset, chunk);
955 }
956
957 /* Called from main context */
958 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
959 pa_sink_state_t old_state;
960 struct userdata *u;
961
962 pa_sink_assert_ref(s);
963 pa_assert_se(u = s->userdata);
964
965 old_state = pa_sink_get_state(u->sink);
966
967 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
968 reserve_done(u);
969 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
970 if (reserve_init(u, u->device_name) < 0)
971 return -1;
972
973 return 0;
974 }
975
976 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
977 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
978
979 pa_assert(u);
980 pa_assert(u->mixer_handle);
981
982 if (mask == SND_CTL_EVENT_MASK_REMOVE)
983 return 0;
984
985 if (mask & SND_CTL_EVENT_MASK_VALUE) {
986 pa_sink_get_volume(u->sink, TRUE, FALSE);
987 pa_sink_get_mute(u->sink, TRUE);
988 }
989
990 return 0;
991 }
992
993 static void sink_get_volume_cb(pa_sink *s) {
994 struct userdata *u = s->userdata;
995 pa_cvolume r;
996 char t[PA_CVOLUME_SNPRINT_MAX];
997
998 pa_assert(u);
999 pa_assert(u->mixer_path);
1000 pa_assert(u->mixer_handle);
1001
1002 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1003 return;
1004
1005 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1006 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1007
1008 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1009
1010 if (pa_cvolume_equal(&u->hardware_volume, &r))
1011 return;
1012
1013 s->virtual_volume = u->hardware_volume = r;
1014
1015 if (u->mixer_path->has_dB) {
1016 pa_cvolume reset;
1017
1018 /* Hmm, so the hardware volume changed, let's reset our software volume */
1019 pa_cvolume_reset(&reset, s->sample_spec.channels);
1020 pa_sink_set_soft_volume(s, &reset);
1021 }
1022 }
1023
1024 static void sink_set_volume_cb(pa_sink *s) {
1025 struct userdata *u = s->userdata;
1026 pa_cvolume r;
1027 char t[PA_CVOLUME_SNPRINT_MAX];
1028
1029 pa_assert(u);
1030 pa_assert(u->mixer_path);
1031 pa_assert(u->mixer_handle);
1032
1033 /* Shift up by the base volume */
1034 pa_sw_cvolume_divide_scalar(&r, &s->virtual_volume, s->base_volume);
1035
1036 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1037 return;
1038
1039 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1040 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1041
1042 u->hardware_volume = r;
1043
1044 if (u->mixer_path->has_dB) {
1045
1046 /* Match exactly what the user requested by software */
1047 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1048
1049 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1050 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1051 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1052
1053 } else {
1054 pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1055
1056 /* We can't match exactly what the user requested, hence let's
1057 * at least tell the user about it */
1058
1059 s->virtual_volume = r;
1060 }
1061 }
1062
1063 static void sink_get_mute_cb(pa_sink *s) {
1064 struct userdata *u = s->userdata;
1065 pa_bool_t b;
1066
1067 pa_assert(u);
1068 pa_assert(u->mixer_path);
1069 pa_assert(u->mixer_handle);
1070
1071 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1072 return;
1073
1074 s->muted = b;
1075 }
1076
1077 static void sink_set_mute_cb(pa_sink *s) {
1078 struct userdata *u = s->userdata;
1079
1080 pa_assert(u);
1081 pa_assert(u->mixer_path);
1082 pa_assert(u->mixer_handle);
1083
1084 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1085 }
1086
1087 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1088 struct userdata *u = s->userdata;
1089 pa_alsa_port_data *data;
1090
1091 pa_assert(u);
1092 pa_assert(p);
1093 pa_assert(u->mixer_handle);
1094
1095 data = PA_DEVICE_PORT_DATA(p);
1096
1097 pa_assert_se(u->mixer_path = data->path);
1098 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1099
1100 if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1101 s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1102 s->n_volume_steps = PA_VOLUME_NORM+1;
1103
1104 if (u->mixer_path->max_dB > 0.0)
1105 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1106 else
1107 pa_log_info("No particular base volume set, fixing to 0 dB");
1108 } else {
1109 s->base_volume = PA_VOLUME_NORM;
1110 s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1111 }
1112
1113 if (data->setting)
1114 pa_alsa_setting_select(data->setting, u->mixer_handle);
1115
1116 if (s->set_mute)
1117 s->set_mute(s);
1118 if (s->set_volume)
1119 s->set_volume(s);
1120
1121 return 0;
1122 }
1123
1124 static void sink_update_requested_latency_cb(pa_sink *s) {
1125 struct userdata *u = s->userdata;
1126 size_t before;
1127 pa_assert(u);
1128
1129 if (!u->pcm_handle)
1130 return;
1131
1132 before = u->hwbuf_unused;
1133 update_sw_params(u);
1134
1135 /* Let's check whether we now use only a smaller part of the
1136 buffer then before. If so, we need to make sure that subsequent
1137 rewinds are relative to the new maximum fill level and not to the
1138 current fill level. Thus, let's do a full rewind once, to clear
1139 things up. */
1140
1141 if (u->hwbuf_unused > before) {
1142 pa_log_debug("Requesting rewind due to latency change.");
1143 pa_sink_request_rewind(s, (size_t) -1);
1144 }
1145 }
1146
1147 static int process_rewind(struct userdata *u) {
1148 snd_pcm_sframes_t unused;
1149 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1150 pa_assert(u);
1151
1152 /* Figure out how much we shall rewind and reset the counter */
1153 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1154
1155 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1156
1157 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1158 pa_log("snd_pcm_avail() failed: %s", pa_alsa_strerror((int) unused));
1159 return -1;
1160 }
1161
1162 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1163
1164 if (u->hwbuf_size > unused_nbytes)
1165 limit_nbytes = u->hwbuf_size - unused_nbytes;
1166 else
1167 limit_nbytes = 0;
1168
1169 if (rewind_nbytes > limit_nbytes)
1170 rewind_nbytes = limit_nbytes;
1171
1172 if (rewind_nbytes > 0) {
1173 snd_pcm_sframes_t in_frames, out_frames;
1174
1175 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1176
1177 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1178 pa_log_debug("before: %lu", (unsigned long) in_frames);
1179 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1180 pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1181 return -1;
1182 }
1183 pa_log_debug("after: %lu", (unsigned long) out_frames);
1184
1185 rewind_nbytes = (size_t) out_frames * u->frame_size;
1186
1187 if (rewind_nbytes <= 0)
1188 pa_log_info("Tried rewind, but was apparently not possible.");
1189 else {
1190 u->write_count -= out_frames * u->frame_size;
1191 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1192 pa_sink_process_rewind(u->sink, rewind_nbytes);
1193
1194 u->after_rewind = TRUE;
1195 return 0;
1196 }
1197 } else
1198 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1199
1200 pa_sink_process_rewind(u->sink, 0);
1201 return 0;
1202 }
1203
1204 static void thread_func(void *userdata) {
1205 struct userdata *u = userdata;
1206 unsigned short revents = 0;
1207
1208 pa_assert(u);
1209
1210 pa_log_debug("Thread starting up");
1211
1212 if (u->core->realtime_scheduling)
1213 pa_make_realtime(u->core->realtime_priority);
1214
1215 pa_thread_mq_install(&u->thread_mq);
1216 pa_rtpoll_install(u->rtpoll);
1217
1218 for (;;) {
1219 int ret;
1220
1221 #ifdef DEBUG_TIMING
1222 pa_log_debug("Loop");
1223 #endif
1224
1225 /* Render some data and write it to the dsp */
1226 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1227 int work_done;
1228 pa_usec_t sleep_usec = 0;
1229
1230 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1231 if (process_rewind(u) < 0)
1232 goto fail;
1233
1234 if (u->use_mmap)
1235 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1236 else
1237 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1238
1239 if (work_done < 0)
1240 goto fail;
1241
1242 /* pa_log_debug("work_done = %i", work_done); */
1243
1244 if (work_done) {
1245
1246 if (u->first) {
1247 pa_log_info("Starting playback.");
1248 snd_pcm_start(u->pcm_handle);
1249
1250 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1251 }
1252
1253 update_smoother(u);
1254 }
1255
1256 if (u->use_tsched) {
1257 pa_usec_t cusec;
1258
1259 if (u->since_start <= u->hwbuf_size) {
1260
1261 /* USB devices on ALSA seem to hit a buffer
1262 * underrun during the first iterations much
1263 * quicker then we calculate here, probably due to
1264 * the transport latency. To accommodate for that
1265 * we artificially decrease the sleep time until
1266 * we have filled the buffer at least once
1267 * completely.*/
1268
1269 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1270 sleep_usec /= 2;
1271 }
1272
1273 /* OK, the playback buffer is now full, let's
1274 * calculate when to wake up next */
1275 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1276
1277 /* Convert from the sound card time domain to the
1278 * system time domain */
1279 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1280
1281 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1282
1283 /* We don't trust the conversion, so we wake up whatever comes first */
1284 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1285 }
1286
1287 u->first = FALSE;
1288 u->after_rewind = FALSE;
1289
1290 } else if (u->use_tsched)
1291
1292 /* OK, we're in an invalid state, let's disable our timers */
1293 pa_rtpoll_set_timer_disabled(u->rtpoll);
1294
1295 /* Hmm, nothing to do. Let's sleep */
1296 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1297 goto fail;
1298
1299 if (ret == 0)
1300 goto finish;
1301
1302 /* Tell ALSA about this and process its response */
1303 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1304 struct pollfd *pollfd;
1305 int err;
1306 unsigned n;
1307
1308 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1309
1310 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1311 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1312 goto fail;
1313 }
1314
1315 if (revents & ~POLLOUT) {
1316 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1317 goto fail;
1318
1319 u->first = TRUE;
1320 u->since_start = 0;
1321 } else if (revents && u->use_tsched && pa_log_ratelimit())
1322 pa_log_debug("Wakeup from ALSA!");
1323
1324 } else
1325 revents = 0;
1326 }
1327
1328 fail:
1329 /* If this was no regular exit from the loop we have to continue
1330 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1331 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1332 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1333
1334 finish:
1335 pa_log_debug("Thread shutting down");
1336 }
1337
1338 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1339 const char *n;
1340 char *t;
1341
1342 pa_assert(data);
1343 pa_assert(ma);
1344 pa_assert(device_name);
1345
1346 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1347 pa_sink_new_data_set_name(data, n);
1348 data->namereg_fail = TRUE;
1349 return;
1350 }
1351
1352 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1353 data->namereg_fail = TRUE;
1354 else {
1355 n = device_id ? device_id : device_name;
1356 data->namereg_fail = FALSE;
1357 }
1358
1359 if (mapping)
1360 t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
1361 else
1362 t = pa_sprintf_malloc("alsa_output.%s", n);
1363
1364 pa_sink_new_data_set_name(data, t);
1365 pa_xfree(t);
1366 }
1367
1368 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1369
1370 if (!mapping && !element)
1371 return;
1372
1373 if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1374 pa_log_info("Failed to find a working mixer device.");
1375 return;
1376 }
1377
1378 if (element) {
1379
1380 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
1381 goto fail;
1382
1383 if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1384 goto fail;
1385
1386 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1387 pa_alsa_path_dump(u->mixer_path);
1388 } else {
1389
1390 if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_OUTPUT)))
1391 goto fail;
1392
1393 pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1394
1395 pa_log_debug("Probed mixer paths:");
1396 pa_alsa_path_set_dump(u->mixer_path_set);
1397 }
1398
1399 return;
1400
1401 fail:
1402
1403 if (u->mixer_path_set) {
1404 pa_alsa_path_set_free(u->mixer_path_set);
1405 u->mixer_path_set = NULL;
1406 } else if (u->mixer_path) {
1407 pa_alsa_path_free(u->mixer_path);
1408 u->mixer_path = NULL;
1409 }
1410
1411 if (u->mixer_handle) {
1412 snd_mixer_close(u->mixer_handle);
1413 u->mixer_handle = NULL;
1414 }
1415 }
1416
1417 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1418 pa_assert(u);
1419
1420 if (!u->mixer_handle)
1421 return 0;
1422
1423 if (u->sink->active_port) {
1424 pa_alsa_port_data *data;
1425
1426 /* We have a list of supported paths, so let's activate the
1427 * one that has been chosen as active */
1428
1429 data = PA_DEVICE_PORT_DATA(u->sink->active_port);
1430 u->mixer_path = data->path;
1431
1432 pa_alsa_path_select(data->path, u->mixer_handle);
1433
1434 if (data->setting)
1435 pa_alsa_setting_select(data->setting, u->mixer_handle);
1436
1437 } else {
1438
1439 if (!u->mixer_path && u->mixer_path_set)
1440 u->mixer_path = u->mixer_path_set->paths;
1441
1442 if (u->mixer_path) {
1443 /* Hmm, we have only a single path, then let's activate it */
1444
1445 pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1446
1447 if (u->mixer_path->settings)
1448 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1449 } else
1450 return 0;
1451 }
1452
1453 if (!u->mixer_path->has_volume)
1454 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1455 else {
1456
1457 if (u->mixer_path->has_dB) {
1458 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1459
1460 u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1461 u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1462
1463 if (u->mixer_path->max_dB > 0.0)
1464 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1465 else
1466 pa_log_info("No particular base volume set, fixing to 0 dB");
1467
1468 } else {
1469 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1470 u->sink->base_volume = PA_VOLUME_NORM;
1471 u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1472 }
1473
1474 u->sink->get_volume = sink_get_volume_cb;
1475 u->sink->set_volume = sink_set_volume_cb;
1476
1477 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->mixer_path->has_dB ? PA_SINK_DECIBEL_VOLUME : 0);
1478 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1479 }
1480
1481 if (!u->mixer_path->has_mute) {
1482 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1483 } else {
1484 u->sink->get_mute = sink_get_mute_cb;
1485 u->sink->set_mute = sink_set_mute_cb;
1486 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1487 pa_log_info("Using hardware mute control.");
1488 }
1489
1490 u->mixer_fdl = pa_alsa_fdlist_new();
1491
1492 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1493 pa_log("Failed to initialize file descriptor monitoring");
1494 return -1;
1495 }
1496
1497 if (u->mixer_path_set)
1498 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1499 else
1500 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1501
1502 return 0;
1503 }
1504
1505 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1506
1507 struct userdata *u = NULL;
1508 const char *dev_id = NULL;
1509 pa_sample_spec ss, requested_ss;
1510 pa_channel_map map;
1511 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1512 snd_pcm_uframes_t period_frames, tsched_frames;
1513 size_t frame_size;
1514 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1515 pa_sink_new_data data;
1516 pa_alsa_profile_set *profile_set = NULL;
1517
1518 pa_assert(m);
1519 pa_assert(ma);
1520
1521 ss = m->core->default_sample_spec;
1522 map = m->core->default_channel_map;
1523 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1524 pa_log("Failed to parse sample specification and channel map");
1525 goto fail;
1526 }
1527
1528 requested_ss = ss;
1529 frame_size = pa_frame_size(&ss);
1530
1531 nfrags = m->core->default_n_fragments;
1532 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1533 if (frag_size <= 0)
1534 frag_size = (uint32_t) frame_size;
1535 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1536 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1537
1538 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1539 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1540 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1541 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1542 pa_log("Failed to parse buffer metrics");
1543 goto fail;
1544 }
1545
1546 hwbuf_size = frag_size * nfrags;
1547 period_frames = frag_size/frame_size;
1548 tsched_frames = tsched_size/frame_size;
1549
1550 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1551 pa_log("Failed to parse mmap argument.");
1552 goto fail;
1553 }
1554
1555 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1556 pa_log("Failed to parse tsched argument.");
1557 goto fail;
1558 }
1559
1560 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1561 pa_log("Failed to parse ignore_dB argument.");
1562 goto fail;
1563 }
1564
1565 if (use_tsched && !pa_rtclock_hrtimer()) {
1566 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1567 use_tsched = FALSE;
1568 }
1569
1570 u = pa_xnew0(struct userdata, 1);
1571 u->core = m->core;
1572 u->module = m;
1573 u->use_mmap = use_mmap;
1574 u->use_tsched = use_tsched;
1575 u->first = TRUE;
1576 u->rtpoll = pa_rtpoll_new();
1577 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1578
1579 u->smoother = pa_smoother_new(
1580 DEFAULT_TSCHED_BUFFER_USEC*2,
1581 DEFAULT_TSCHED_BUFFER_USEC*2,
1582 TRUE,
1583 TRUE,
1584 5,
1585 pa_rtclock_now(),
1586 TRUE);
1587
1588 dev_id = pa_modargs_get_value(
1589 ma, "device_id",
1590 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1591
1592 if (reserve_init(u, dev_id) < 0)
1593 goto fail;
1594
1595 if (reserve_monitor_init(u, dev_id) < 0)
1596 goto fail;
1597
1598 b = use_mmap;
1599 d = use_tsched;
1600
1601 if (mapping) {
1602
1603 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1604 pa_log("device_id= not set");
1605 goto fail;
1606 }
1607
1608 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1609 dev_id,
1610 &u->device_name,
1611 &ss, &map,
1612 SND_PCM_STREAM_PLAYBACK,
1613 &nfrags, &period_frames, tsched_frames,
1614 &b, &d, mapping)))
1615
1616 goto fail;
1617
1618 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1619
1620 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1621 goto fail;
1622
1623 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1624 dev_id,
1625 &u->device_name,
1626 &ss, &map,
1627 SND_PCM_STREAM_PLAYBACK,
1628 &nfrags, &period_frames, tsched_frames,
1629 &b, &d, profile_set, &mapping)))
1630
1631 goto fail;
1632
1633 } else {
1634
1635 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1636 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1637 &u->device_name,
1638 &ss, &map,
1639 SND_PCM_STREAM_PLAYBACK,
1640 &nfrags, &period_frames, tsched_frames,
1641 &b, &d, FALSE)))
1642 goto fail;
1643 }
1644
1645 pa_assert(u->device_name);
1646 pa_log_info("Successfully opened device %s.", u->device_name);
1647
1648 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1649 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1650 goto fail;
1651 }
1652
1653 if (mapping)
1654 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1655
1656 if (use_mmap && !b) {
1657 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1658 u->use_mmap = use_mmap = FALSE;
1659 }
1660
1661 if (use_tsched && (!b || !d)) {
1662 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1663 u->use_tsched = use_tsched = FALSE;
1664 }
1665
1666 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1667 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1668 u->use_tsched = use_tsched = FALSE;
1669 }
1670
1671 if (u->use_mmap)
1672 pa_log_info("Successfully enabled mmap() mode.");
1673
1674 if (u->use_tsched)
1675 pa_log_info("Successfully enabled timer-based scheduling mode.");
1676
1677 /* ALSA might tweak the sample spec, so recalculate the frame size */
1678 frame_size = pa_frame_size(&ss);
1679
1680 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1681
1682 pa_sink_new_data_init(&data);
1683 data.driver = driver;
1684 data.module = m;
1685 data.card = card;
1686 set_sink_name(&data, ma, dev_id, u->device_name, mapping);
1687 pa_sink_new_data_set_sample_spec(&data, &ss);
1688 pa_sink_new_data_set_channel_map(&data, &map);
1689
1690 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1691 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1692 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1693 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1694 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1695
1696 if (mapping) {
1697 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1698 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1699 }
1700
1701 pa_alsa_init_description(data.proplist);
1702
1703 if (u->control_device)
1704 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1705
1706 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1707 pa_log("Invalid properties");
1708 pa_sink_new_data_done(&data);
1709 goto fail;
1710 }
1711
1712 if (u->mixer_path_set)
1713 pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1714
1715 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1716 pa_sink_new_data_done(&data);
1717
1718 if (!u->sink) {
1719 pa_log("Failed to create sink object");
1720 goto fail;
1721 }
1722
1723 u->sink->parent.process_msg = sink_process_msg;
1724 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1725 u->sink->set_state = sink_set_state_cb;
1726 u->sink->set_port = sink_set_port_cb;
1727 u->sink->userdata = u;
1728
1729 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1730 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1731
1732 u->frame_size = frame_size;
1733 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1734 u->nfragments = nfrags;
1735 u->hwbuf_size = u->fragment_size * nfrags;
1736 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1737 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1738
1739 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1740 nfrags, (long unsigned) u->fragment_size,
1741 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1742
1743 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1744 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1745
1746 if (u->use_tsched) {
1747 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1748
1749 fix_min_sleep_wakeup(u);
1750 fix_tsched_watermark(u);
1751
1752 pa_sink_set_latency_range(u->sink,
1753 0,
1754 pa_bytes_to_usec(u->hwbuf_size, &ss));
1755
1756 pa_log_info("Time scheduling watermark is %0.2fms",
1757 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1758 } else
1759 pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
1760
1761 reserve_update(u);
1762
1763 if (update_sw_params(u) < 0)
1764 goto fail;
1765
1766 if (setup_mixer(u, ignore_dB) < 0)
1767 goto fail;
1768
1769 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1770
1771 if (!(u->thread = pa_thread_new(thread_func, u))) {
1772 pa_log("Failed to create thread.");
1773 goto fail;
1774 }
1775
1776 /* Get initial mixer settings */
1777 if (data.volume_is_set) {
1778 if (u->sink->set_volume)
1779 u->sink->set_volume(u->sink);
1780 } else {
1781 if (u->sink->get_volume)
1782 u->sink->get_volume(u->sink);
1783 }
1784
1785 if (data.muted_is_set) {
1786 if (u->sink->set_mute)
1787 u->sink->set_mute(u->sink);
1788 } else {
1789 if (u->sink->get_mute)
1790 u->sink->get_mute(u->sink);
1791 }
1792
1793 pa_sink_put(u->sink);
1794
1795 if (profile_set)
1796 pa_alsa_profile_set_free(profile_set);
1797
1798 return u->sink;
1799
1800 fail:
1801
1802 if (u)
1803 userdata_free(u);
1804
1805 if (profile_set)
1806 pa_alsa_profile_set_free(profile_set);
1807
1808 return NULL;
1809 }
1810
1811 static void userdata_free(struct userdata *u) {
1812 pa_assert(u);
1813
1814 if (u->sink)
1815 pa_sink_unlink(u->sink);
1816
1817 if (u->thread) {
1818 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1819 pa_thread_free(u->thread);
1820 }
1821
1822 pa_thread_mq_done(&u->thread_mq);
1823
1824 if (u->sink)
1825 pa_sink_unref(u->sink);
1826
1827 if (u->memchunk.memblock)
1828 pa_memblock_unref(u->memchunk.memblock);
1829
1830 if (u->alsa_rtpoll_item)
1831 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1832
1833 if (u->rtpoll)
1834 pa_rtpoll_free(u->rtpoll);
1835
1836 if (u->pcm_handle) {
1837 snd_pcm_drop(u->pcm_handle);
1838 snd_pcm_close(u->pcm_handle);
1839 }
1840
1841 if (u->mixer_fdl)
1842 pa_alsa_fdlist_free(u->mixer_fdl);
1843
1844 if (u->mixer_path_set)
1845 pa_alsa_path_set_free(u->mixer_path_set);
1846 else if (u->mixer_path)
1847 pa_alsa_path_free(u->mixer_path);
1848
1849 if (u->mixer_handle)
1850 snd_mixer_close(u->mixer_handle);
1851
1852 if (u->smoother)
1853 pa_smoother_free(u->smoother);
1854
1855 reserve_done(u);
1856 monitor_done(u);
1857
1858 pa_xfree(u->device_name);
1859 pa_xfree(u->control_device);
1860 pa_xfree(u);
1861 }
1862
1863 void pa_alsa_sink_free(pa_sink *s) {
1864 struct userdata *u;
1865
1866 pa_sink_assert_ref(s);
1867 pa_assert_se(u = s->userdata);
1868
1869 userdata_free(u);
1870 }