]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 snd_mixer_elem_t *mixer_elem;
84 long hw_volume_max, hw_volume_min;
85 long hw_dB_max, hw_dB_min;
86 pa_bool_t hw_dB_supported:1;
87 pa_bool_t mixer_seperate_channels:1;
88 pa_cvolume hardware_volume;
89
90 size_t
91 frame_size,
92 fragment_size,
93 hwbuf_size,
94 tsched_watermark,
95 hwbuf_unused,
96 min_sleep,
97 min_wakeup,
98 watermark_step;
99
100 unsigned nfragments;
101 pa_memchunk memchunk;
102
103 char *device_name;
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 };
120
121 static void userdata_free(struct userdata *u);
122
123 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
124 pa_assert(r);
125 pa_assert(u);
126
127 if (pa_sink_suspend(u->sink, TRUE) < 0)
128 return PA_HOOK_CANCEL;
129
130 return PA_HOOK_OK;
131 }
132
133 static void reserve_done(struct userdata *u) {
134 pa_assert(u);
135
136 if (u->reserve_slot) {
137 pa_hook_slot_free(u->reserve_slot);
138 u->reserve_slot = NULL;
139 }
140
141 if (u->reserve) {
142 pa_reserve_wrapper_unref(u->reserve);
143 u->reserve = NULL;
144 }
145 }
146
147 static void reserve_update(struct userdata *u) {
148 const char *description;
149 pa_assert(u);
150
151 if (!u->sink || !u->reserve)
152 return;
153
154 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
155 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
156 }
157
158 static int reserve_init(struct userdata *u, const char *dname) {
159 char *rname;
160
161 pa_assert(u);
162 pa_assert(dname);
163
164 if (u->reserve)
165 return 0;
166
167 if (pa_in_system_mode())
168 return 0;
169
170 /* We are resuming, try to lock the device */
171 if (!(rname = pa_alsa_get_reserve_name(dname)))
172 return 0;
173
174 u->reserve = pa_reserve_wrapper_get(u->core, rname);
175 pa_xfree(rname);
176
177 if (!(u->reserve))
178 return -1;
179
180 reserve_update(u);
181
182 pa_assert(!u->reserve_slot);
183 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
184
185 return 0;
186 }
187
188 static void fix_min_sleep_wakeup(struct userdata *u) {
189 size_t max_use, max_use_2;
190
191 pa_assert(u);
192
193 max_use = u->hwbuf_size - u->hwbuf_unused;
194 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
195
196 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
197 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
198
199 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
200 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
201 }
202
203 static void fix_tsched_watermark(struct userdata *u) {
204 size_t max_use;
205 pa_assert(u);
206
207 max_use = u->hwbuf_size - u->hwbuf_unused;
208
209 if (u->tsched_watermark > max_use - u->min_sleep)
210 u->tsched_watermark = max_use - u->min_sleep;
211
212 if (u->tsched_watermark < u->min_wakeup)
213 u->tsched_watermark = u->min_wakeup;
214 }
215
216 static void adjust_after_underrun(struct userdata *u) {
217 size_t old_watermark;
218 pa_usec_t old_min_latency, new_min_latency;
219
220 pa_assert(u);
221 pa_assert(u->use_tsched);
222
223 /* First, just try to increase the watermark */
224 old_watermark = u->tsched_watermark;
225 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
226 fix_tsched_watermark(u);
227
228 if (old_watermark != u->tsched_watermark) {
229 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
230 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
231 return;
232 }
233
234 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
235 old_min_latency = u->sink->thread_info.min_latency;
236 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
237 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
238
239 if (old_min_latency != new_min_latency) {
240 pa_log_notice("Increasing minimal latency to %0.2f ms",
241 (double) new_min_latency / PA_USEC_PER_MSEC);
242
243 pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
244 return;
245 }
246
247 /* When we reach this we're officialy fucked! */
248 }
249
250 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
251 pa_usec_t usec, wm;
252
253 pa_assert(sleep_usec);
254 pa_assert(process_usec);
255
256 pa_assert(u);
257
258 usec = pa_sink_get_requested_latency_within_thread(u->sink);
259
260 if (usec == (pa_usec_t) -1)
261 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
262
263 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
264
265 if (wm > usec)
266 wm = usec/2;
267
268 *sleep_usec = usec - wm;
269 *process_usec = wm;
270
271 #ifdef DEBUG_TIMING
272 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
273 (unsigned long) (usec / PA_USEC_PER_MSEC),
274 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
275 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
276 #endif
277 }
278
279 static int try_recover(struct userdata *u, const char *call, int err) {
280 pa_assert(u);
281 pa_assert(call);
282 pa_assert(err < 0);
283
284 pa_log_debug("%s: %s", call, snd_strerror(err));
285
286 pa_assert(err != -EAGAIN);
287
288 if (err == -EPIPE)
289 pa_log_debug("%s: Buffer underrun!", call);
290
291 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
292 pa_log("%s: %s", call, snd_strerror(err));
293 return -1;
294 }
295
296 u->first = TRUE;
297 u->since_start = 0;
298 return 0;
299 }
300
301 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
302 size_t left_to_play;
303
304 /* We use <= instead of < for this check here because an underrun
305 * only happens after the last sample was processed, not already when
306 * it is removed from the buffer. This is particularly important
307 * when block transfer is used. */
308
309 if (n_bytes <= u->hwbuf_size) {
310 left_to_play = u->hwbuf_size - n_bytes;
311
312 #ifdef DEBUG_TIMING
313 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
314 #endif
315
316 } else {
317 left_to_play = 0;
318
319 #ifdef DEBUG_TIMING
320 PA_DEBUG_TRAP;
321 #endif
322
323 if (!u->first && !u->after_rewind) {
324
325 if (pa_log_ratelimit())
326 pa_log_info("Underrun!");
327
328 if (u->use_tsched)
329 adjust_after_underrun(u);
330 }
331 }
332
333 return left_to_play;
334 }
335
336 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
337 pa_bool_t work_done = TRUE;
338 pa_usec_t max_sleep_usec = 0, process_usec = 0;
339 size_t left_to_play;
340 unsigned j = 0;
341
342 pa_assert(u);
343 pa_sink_assert_ref(u->sink);
344
345 if (u->use_tsched)
346 hw_sleep_time(u, &max_sleep_usec, &process_usec);
347
348 for (;;) {
349 snd_pcm_sframes_t n;
350 size_t n_bytes;
351 int r;
352
353 /* First we determine how many samples are missing to fill the
354 * buffer up to 100% */
355
356 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
357
358 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
359 continue;
360
361 return r;
362 }
363
364 n_bytes = (size_t) n * u->frame_size;
365
366 #ifdef DEBUG_TIMING
367 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
368 #endif
369
370 left_to_play = check_left_to_play(u, n_bytes);
371
372 if (u->use_tsched)
373
374 /* We won't fill up the playback buffer before at least
375 * half the sleep time is over because otherwise we might
376 * ask for more data from the clients then they expect. We
377 * need to guarantee that clients only have to keep around
378 * a single hw buffer length. */
379
380 if (!polled &&
381 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
382 #ifdef DEBUG_TIMING
383 pa_log_debug("Not filling up, because too early.");
384 #endif
385 break;
386 }
387
388 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
389
390 if (polled)
391 PA_ONCE_BEGIN {
392 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
393 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
394 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
395 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
396 pa_strnull(dn));
397 pa_xfree(dn);
398 } PA_ONCE_END;
399
400 #ifdef DEBUG_TIMING
401 pa_log_debug("Not filling up, because not necessary.");
402 #endif
403 break;
404 }
405
406
407 if (++j > 10) {
408 #ifdef DEBUG_TIMING
409 pa_log_debug("Not filling up, because already too many iterations.");
410 #endif
411
412 break;
413 }
414
415 n_bytes -= u->hwbuf_unused;
416 polled = FALSE;
417
418 #ifdef DEBUG_TIMING
419 pa_log_debug("Filling up");
420 #endif
421
422 for (;;) {
423 pa_memchunk chunk;
424 void *p;
425 int err;
426 const snd_pcm_channel_area_t *areas;
427 snd_pcm_uframes_t offset, frames;
428 snd_pcm_sframes_t sframes;
429
430 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
431 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
432
433 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
434
435 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
436 continue;
437
438 return r;
439 }
440
441 /* Make sure that if these memblocks need to be copied they will fit into one slot */
442 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
443 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
444
445 /* Check these are multiples of 8 bit */
446 pa_assert((areas[0].first & 7) == 0);
447 pa_assert((areas[0].step & 7)== 0);
448
449 /* We assume a single interleaved memory buffer */
450 pa_assert((areas[0].first >> 3) == 0);
451 pa_assert((areas[0].step >> 3) == u->frame_size);
452
453 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
454
455 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
456 chunk.length = pa_memblock_get_length(chunk.memblock);
457 chunk.index = 0;
458
459 pa_sink_render_into_full(u->sink, &chunk);
460 pa_memblock_unref_fixed(chunk.memblock);
461
462 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
463
464 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
465 continue;
466
467 return r;
468 }
469
470 work_done = TRUE;
471
472 u->write_count += frames * u->frame_size;
473 u->since_start += frames * u->frame_size;
474
475 #ifdef DEBUG_TIMING
476 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
477 #endif
478
479 if ((size_t) frames * u->frame_size >= n_bytes)
480 break;
481
482 n_bytes -= (size_t) frames * u->frame_size;
483 }
484 }
485
486 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
487
488 if (*sleep_usec > process_usec)
489 *sleep_usec -= process_usec;
490 else
491 *sleep_usec = 0;
492
493 return work_done ? 1 : 0;
494 }
495
496 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
497 pa_bool_t work_done = FALSE;
498 pa_usec_t max_sleep_usec = 0, process_usec = 0;
499 size_t left_to_play;
500 unsigned j = 0;
501
502 pa_assert(u);
503 pa_sink_assert_ref(u->sink);
504
505 if (u->use_tsched)
506 hw_sleep_time(u, &max_sleep_usec, &process_usec);
507
508 for (;;) {
509 snd_pcm_sframes_t n;
510 size_t n_bytes;
511 int r;
512
513 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
514
515 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
516 continue;
517
518 return r;
519 }
520
521 n_bytes = (size_t) n * u->frame_size;
522 left_to_play = check_left_to_play(u, n_bytes);
523
524 if (u->use_tsched)
525
526 /* We won't fill up the playback buffer before at least
527 * half the sleep time is over because otherwise we might
528 * ask for more data from the clients then they expect. We
529 * need to guarantee that clients only have to keep around
530 * a single hw buffer length. */
531
532 if (!polled &&
533 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
534 break;
535
536 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
537
538 if (polled)
539 PA_ONCE_BEGIN {
540 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
541 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
542 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
543 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
544 pa_strnull(dn));
545 pa_xfree(dn);
546 } PA_ONCE_END;
547
548 break;
549 }
550
551 if (++j > 10) {
552 #ifdef DEBUG_TIMING
553 pa_log_debug("Not filling up, because already too many iterations.");
554 #endif
555
556 break;
557 }
558
559 n_bytes -= u->hwbuf_unused;
560 polled = FALSE;
561
562 for (;;) {
563 snd_pcm_sframes_t frames;
564 void *p;
565
566 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
567
568 if (u->memchunk.length <= 0)
569 pa_sink_render(u->sink, n_bytes, &u->memchunk);
570
571 pa_assert(u->memchunk.length > 0);
572
573 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
574
575 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
576 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
577
578 p = pa_memblock_acquire(u->memchunk.memblock);
579 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
580 pa_memblock_release(u->memchunk.memblock);
581
582 pa_assert(frames != 0);
583
584 if (PA_UNLIKELY(frames < 0)) {
585
586 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
587 continue;
588
589 return r;
590 }
591
592 u->memchunk.index += (size_t) frames * u->frame_size;
593 u->memchunk.length -= (size_t) frames * u->frame_size;
594
595 if (u->memchunk.length <= 0) {
596 pa_memblock_unref(u->memchunk.memblock);
597 pa_memchunk_reset(&u->memchunk);
598 }
599
600 work_done = TRUE;
601
602 u->write_count += frames * u->frame_size;
603 u->since_start += frames * u->frame_size;
604
605 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
606
607 if ((size_t) frames * u->frame_size >= n_bytes)
608 break;
609
610 n_bytes -= (size_t) frames * u->frame_size;
611 }
612 }
613
614 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
615
616 if (*sleep_usec > process_usec)
617 *sleep_usec -= process_usec;
618 else
619 *sleep_usec = 0;
620
621 return work_done ? 1 : 0;
622 }
623
624 static void update_smoother(struct userdata *u) {
625 snd_pcm_sframes_t delay = 0;
626 int64_t position;
627 int err;
628 pa_usec_t now1 = 0, now2;
629 snd_pcm_status_t *status;
630
631 snd_pcm_status_alloca(&status);
632
633 pa_assert(u);
634 pa_assert(u->pcm_handle);
635
636 /* Let's update the time smoother */
637
638 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
639 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
640 return;
641 }
642
643 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
644 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
645 else {
646 snd_htimestamp_t htstamp = { 0, 0 };
647 snd_pcm_status_get_htstamp(status, &htstamp);
648 now1 = pa_timespec_load(&htstamp);
649 }
650
651 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
652
653 if (PA_UNLIKELY(position < 0))
654 position = 0;
655
656 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
657 if (now1 <= 0)
658 now1 = pa_rtclock_usec();
659
660 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
661
662 pa_smoother_put(u->smoother, now1, now2);
663 }
664
665 static pa_usec_t sink_get_latency(struct userdata *u) {
666 pa_usec_t r;
667 int64_t delay;
668 pa_usec_t now1, now2;
669
670 pa_assert(u);
671
672 now1 = pa_rtclock_usec();
673 now2 = pa_smoother_get(u->smoother, now1);
674
675 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
676
677 r = delay >= 0 ? (pa_usec_t) delay : 0;
678
679 if (u->memchunk.memblock)
680 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
681
682 return r;
683 }
684
685 static int build_pollfd(struct userdata *u) {
686 pa_assert(u);
687 pa_assert(u->pcm_handle);
688
689 if (u->alsa_rtpoll_item)
690 pa_rtpoll_item_free(u->alsa_rtpoll_item);
691
692 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
693 return -1;
694
695 return 0;
696 }
697
698 /* Called from IO context */
699 static int suspend(struct userdata *u) {
700 pa_assert(u);
701 pa_assert(u->pcm_handle);
702
703 pa_smoother_pause(u->smoother, pa_rtclock_usec());
704
705 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
706 * take awfully long with our long buffer sizes today. */
707 snd_pcm_close(u->pcm_handle);
708 u->pcm_handle = NULL;
709
710 if (u->alsa_rtpoll_item) {
711 pa_rtpoll_item_free(u->alsa_rtpoll_item);
712 u->alsa_rtpoll_item = NULL;
713 }
714
715 pa_log_info("Device suspended...");
716
717 return 0;
718 }
719
720 /* Called from IO context */
721 static int update_sw_params(struct userdata *u) {
722 snd_pcm_uframes_t avail_min;
723 int err;
724
725 pa_assert(u);
726
727 /* Use the full buffer if noone asked us for anything specific */
728 u->hwbuf_unused = 0;
729
730 if (u->use_tsched) {
731 pa_usec_t latency;
732
733 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
734 size_t b;
735
736 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
737
738 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
739
740 /* We need at least one sample in our buffer */
741
742 if (PA_UNLIKELY(b < u->frame_size))
743 b = u->frame_size;
744
745 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
746 }
747
748 fix_min_sleep_wakeup(u);
749 fix_tsched_watermark(u);
750 }
751
752 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
753
754 /* We need at last one frame in the used part of the buffer */
755 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
756
757 if (u->use_tsched) {
758 pa_usec_t sleep_usec, process_usec;
759
760 hw_sleep_time(u, &sleep_usec, &process_usec);
761 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
762 }
763
764 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
765
766 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
767 pa_log("Failed to set software parameters: %s", snd_strerror(err));
768 return err;
769 }
770
771 pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
772
773 return 0;
774 }
775
776 /* Called from IO context */
777 static int unsuspend(struct userdata *u) {
778 pa_sample_spec ss;
779 int err;
780 pa_bool_t b, d;
781 unsigned nfrags;
782 snd_pcm_uframes_t period_size;
783
784 pa_assert(u);
785 pa_assert(!u->pcm_handle);
786
787 pa_log_info("Trying resume...");
788
789 snd_config_update_free_global();
790 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
791 /*SND_PCM_NONBLOCK|*/
792 SND_PCM_NO_AUTO_RESAMPLE|
793 SND_PCM_NO_AUTO_CHANNELS|
794 SND_PCM_NO_AUTO_FORMAT)) < 0) {
795 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
796 goto fail;
797 }
798
799 ss = u->sink->sample_spec;
800 nfrags = u->nfragments;
801 period_size = u->fragment_size / u->frame_size;
802 b = u->use_mmap;
803 d = u->use_tsched;
804
805 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
806 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
807 goto fail;
808 }
809
810 if (b != u->use_mmap || d != u->use_tsched) {
811 pa_log_warn("Resume failed, couldn't get original access mode.");
812 goto fail;
813 }
814
815 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
816 pa_log_warn("Resume failed, couldn't restore original sample settings.");
817 goto fail;
818 }
819
820 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
821 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
822 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
823 (unsigned long) nfrags, period_size * u->frame_size);
824 goto fail;
825 }
826
827 if (update_sw_params(u) < 0)
828 goto fail;
829
830 if (build_pollfd(u) < 0)
831 goto fail;
832
833 u->first = TRUE;
834 u->since_start = 0;
835
836 pa_log_info("Resumed successfully...");
837
838 return 0;
839
840 fail:
841 if (u->pcm_handle) {
842 snd_pcm_close(u->pcm_handle);
843 u->pcm_handle = NULL;
844 }
845
846 return -1;
847 }
848
849 /* Called from IO context */
850 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
851 struct userdata *u = PA_SINK(o)->userdata;
852
853 switch (code) {
854
855 case PA_SINK_MESSAGE_GET_LATENCY: {
856 pa_usec_t r = 0;
857
858 if (u->pcm_handle)
859 r = sink_get_latency(u);
860
861 *((pa_usec_t*) data) = r;
862
863 return 0;
864 }
865
866 case PA_SINK_MESSAGE_SET_STATE:
867
868 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
869
870 case PA_SINK_SUSPENDED:
871 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
872
873 if (suspend(u) < 0)
874 return -1;
875
876 break;
877
878 case PA_SINK_IDLE:
879 case PA_SINK_RUNNING:
880
881 if (u->sink->thread_info.state == PA_SINK_INIT) {
882 if (build_pollfd(u) < 0)
883 return -1;
884 }
885
886 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
887 if (unsuspend(u) < 0)
888 return -1;
889 }
890
891 break;
892
893 case PA_SINK_UNLINKED:
894 case PA_SINK_INIT:
895 case PA_SINK_INVALID_STATE:
896 ;
897 }
898
899 break;
900 }
901
902 return pa_sink_process_msg(o, code, data, offset, chunk);
903 }
904
905 /* Called from main context */
906 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
907 pa_sink_state_t old_state;
908 struct userdata *u;
909
910 pa_sink_assert_ref(s);
911 pa_assert_se(u = s->userdata);
912
913 old_state = pa_sink_get_state(u->sink);
914
915 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
916 reserve_done(u);
917 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
918 if (reserve_init(u, u->device_name) < 0)
919 return -1;
920
921 return 0;
922 }
923
924 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
925 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
926
927 pa_assert(u);
928 pa_assert(u->mixer_handle);
929
930 if (mask == SND_CTL_EVENT_MASK_REMOVE)
931 return 0;
932
933 if (mask & SND_CTL_EVENT_MASK_VALUE) {
934 pa_sink_get_volume(u->sink, TRUE, FALSE);
935 pa_sink_get_mute(u->sink, TRUE);
936 }
937
938 return 0;
939 }
940
941 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
942
943 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
944 (double) (u->hw_volume_max - u->hw_volume_min));
945 }
946
947 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
948 long alsa_vol;
949
950 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
951 / PA_VOLUME_NORM) + u->hw_volume_min;
952
953 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
954 }
955
956 static void sink_get_volume_cb(pa_sink *s) {
957 struct userdata *u = s->userdata;
958 int err;
959 unsigned i;
960 pa_cvolume r;
961 char t[PA_CVOLUME_SNPRINT_MAX];
962
963 pa_assert(u);
964 pa_assert(u->mixer_elem);
965
966 if (u->mixer_seperate_channels) {
967
968 r.channels = s->sample_spec.channels;
969
970 for (i = 0; i < s->sample_spec.channels; i++) {
971 long alsa_vol;
972
973 if (u->hw_dB_supported) {
974
975 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
976 goto fail;
977
978 #ifdef HAVE_VALGRIND_MEMCHECK_H
979 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
980 #endif
981
982 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
983 } else {
984
985 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
986 goto fail;
987
988 r.values[i] = from_alsa_volume(u, alsa_vol);
989 }
990 }
991
992 } else {
993 long alsa_vol;
994
995 if (u->hw_dB_supported) {
996
997 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
998 goto fail;
999
1000 #ifdef HAVE_VALGRIND_MEMCHECK_H
1001 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1002 #endif
1003
1004 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1005
1006 } else {
1007
1008 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1009 goto fail;
1010
1011 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1012 }
1013 }
1014
1015 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1016
1017 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
1018
1019 s->virtual_volume = u->hardware_volume = r;
1020
1021 if (u->hw_dB_supported) {
1022 pa_cvolume reset;
1023
1024 /* Hmm, so the hardware volume changed, let's reset our software volume */
1025 pa_cvolume_reset(&reset, s->sample_spec.channels);
1026 pa_sink_set_soft_volume(s, &reset);
1027 }
1028 }
1029
1030 return;
1031
1032 fail:
1033 pa_log_error("Unable to read volume: %s", snd_strerror(err));
1034 }
1035
1036 static void sink_set_volume_cb(pa_sink *s) {
1037 struct userdata *u = s->userdata;
1038 int err;
1039 unsigned i;
1040 pa_cvolume r;
1041
1042 pa_assert(u);
1043 pa_assert(u->mixer_elem);
1044
1045 if (u->mixer_seperate_channels) {
1046
1047 r.channels = s->sample_spec.channels;
1048
1049 for (i = 0; i < s->sample_spec.channels; i++) {
1050 long alsa_vol;
1051 pa_volume_t vol;
1052
1053 vol = s->virtual_volume.values[i];
1054
1055 if (u->hw_dB_supported) {
1056
1057 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1058 alsa_vol += u->hw_dB_max;
1059 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1060
1061 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
1062 goto fail;
1063
1064 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1065 goto fail;
1066
1067 #ifdef HAVE_VALGRIND_MEMCHECK_H
1068 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1069 #endif
1070
1071 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1072
1073 } else {
1074 alsa_vol = to_alsa_volume(u, vol);
1075
1076 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
1077 goto fail;
1078
1079 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1080 goto fail;
1081
1082 r.values[i] = from_alsa_volume(u, alsa_vol);
1083 }
1084 }
1085
1086 } else {
1087 pa_volume_t vol;
1088 long alsa_vol;
1089
1090 vol = pa_cvolume_max(&s->virtual_volume);
1091
1092 if (u->hw_dB_supported) {
1093 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1094 alsa_vol += u->hw_dB_max;
1095 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1096
1097 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
1098 goto fail;
1099
1100 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1101 goto fail;
1102
1103 #ifdef HAVE_VALGRIND_MEMCHECK_H
1104 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1105 #endif
1106
1107 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1108
1109 } else {
1110 alsa_vol = to_alsa_volume(u, vol);
1111
1112 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
1113 goto fail;
1114
1115 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1116 goto fail;
1117
1118 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1119 }
1120 }
1121
1122 u->hardware_volume = r;
1123
1124 if (u->hw_dB_supported) {
1125 char t[PA_CVOLUME_SNPRINT_MAX];
1126
1127 /* Match exactly what the user requested by software */
1128 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1129
1130 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1131 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1132 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1133
1134 } else
1135
1136 /* We can't match exactly what the user requested, hence let's
1137 * at least tell the user about it */
1138
1139 s->virtual_volume = r;
1140
1141 return;
1142
1143 fail:
1144 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1145 }
1146
1147 static void sink_get_mute_cb(pa_sink *s) {
1148 struct userdata *u = s->userdata;
1149 int err, sw;
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_elem);
1153
1154 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1155 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1156 return;
1157 }
1158
1159 s->muted = !sw;
1160 }
1161
1162 static void sink_set_mute_cb(pa_sink *s) {
1163 struct userdata *u = s->userdata;
1164 int err;
1165
1166 pa_assert(u);
1167 pa_assert(u->mixer_elem);
1168
1169 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1170 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1171 return;
1172 }
1173 }
1174
1175 static void sink_update_requested_latency_cb(pa_sink *s) {
1176 struct userdata *u = s->userdata;
1177 size_t before;
1178 pa_assert(u);
1179
1180 if (!u->pcm_handle)
1181 return;
1182
1183 before = u->hwbuf_unused;
1184 update_sw_params(u);
1185
1186 /* Let's check whether we now use only a smaller part of the
1187 buffer then before. If so, we need to make sure that subsequent
1188 rewinds are relative to the new maximum fill level and not to the
1189 current fill level. Thus, let's do a full rewind once, to clear
1190 things up. */
1191
1192 if (u->hwbuf_unused > before) {
1193 pa_log_debug("Requesting rewind due to latency change.");
1194 pa_sink_request_rewind(s, (size_t) -1);
1195 }
1196 }
1197
1198 static int process_rewind(struct userdata *u) {
1199 snd_pcm_sframes_t unused;
1200 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1201 pa_assert(u);
1202
1203 /* Figure out how much we shall rewind and reset the counter */
1204 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1205
1206 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1207
1208 if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1209 pa_log("snd_pcm_avail() failed: %s", snd_strerror((int) unused));
1210 return -1;
1211 }
1212
1213 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1214
1215 if (u->hwbuf_size > unused_nbytes)
1216 limit_nbytes = u->hwbuf_size - unused_nbytes;
1217 else
1218 limit_nbytes = 0;
1219
1220 if (rewind_nbytes > limit_nbytes)
1221 rewind_nbytes = limit_nbytes;
1222
1223 if (rewind_nbytes > 0) {
1224 snd_pcm_sframes_t in_frames, out_frames;
1225
1226 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1227
1228 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1229 pa_log_debug("before: %lu", (unsigned long) in_frames);
1230 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1231 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1232 return -1;
1233 }
1234 pa_log_debug("after: %lu", (unsigned long) out_frames);
1235
1236 rewind_nbytes = (size_t) out_frames * u->frame_size;
1237
1238 if (rewind_nbytes <= 0)
1239 pa_log_info("Tried rewind, but was apparently not possible.");
1240 else {
1241 u->write_count -= out_frames * u->frame_size;
1242 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1243 pa_sink_process_rewind(u->sink, rewind_nbytes);
1244
1245 u->after_rewind = TRUE;
1246 return 0;
1247 }
1248 } else
1249 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1250
1251 pa_sink_process_rewind(u->sink, 0);
1252 return 0;
1253 }
1254
1255 static void thread_func(void *userdata) {
1256 struct userdata *u = userdata;
1257 unsigned short revents = 0;
1258
1259 pa_assert(u);
1260
1261 pa_log_debug("Thread starting up");
1262
1263 if (u->core->realtime_scheduling)
1264 pa_make_realtime(u->core->realtime_priority);
1265
1266 pa_thread_mq_install(&u->thread_mq);
1267 pa_rtpoll_install(u->rtpoll);
1268
1269 for (;;) {
1270 int ret;
1271
1272 #ifdef DEBUG_TIMING
1273 pa_log_debug("Loop");
1274 #endif
1275
1276 /* Render some data and write it to the dsp */
1277 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1278 int work_done;
1279 pa_usec_t sleep_usec = 0;
1280
1281 if (PA_UNLIKELY(u->sink->thread_info.rewind_requested))
1282 if (process_rewind(u) < 0)
1283 goto fail;
1284
1285 if (u->use_mmap)
1286 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1287 else
1288 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1289
1290 if (work_done < 0)
1291 goto fail;
1292
1293 /* pa_log_debug("work_done = %i", work_done); */
1294
1295 if (work_done) {
1296
1297 if (u->first) {
1298 pa_log_info("Starting playback.");
1299 snd_pcm_start(u->pcm_handle);
1300
1301 pa_smoother_resume(u->smoother, pa_rtclock_usec(), TRUE);
1302 }
1303
1304 update_smoother(u);
1305 }
1306
1307 if (u->use_tsched) {
1308 pa_usec_t cusec;
1309
1310 if (u->since_start <= u->hwbuf_size) {
1311
1312 /* USB devices on ALSA seem to hit a buffer
1313 * underrun during the first iterations much
1314 * quicker then we calculate here, probably due to
1315 * the transport latency. To accommodate for that
1316 * we artificially decrease the sleep time until
1317 * we have filled the buffer at least once
1318 * completely.*/
1319
1320 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1321 sleep_usec /= 2;
1322 }
1323
1324 /* OK, the playback buffer is now full, let's
1325 * calculate when to wake up next */
1326 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1327
1328 /* Convert from the sound card time domain to the
1329 * system time domain */
1330 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1331
1332 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1333
1334 /* We don't trust the conversion, so we wake up whatever comes first */
1335 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1336 }
1337
1338 u->first = FALSE;
1339 u->after_rewind = FALSE;
1340
1341 } else if (u->use_tsched)
1342
1343 /* OK, we're in an invalid state, let's disable our timers */
1344 pa_rtpoll_set_timer_disabled(u->rtpoll);
1345
1346 /* Hmm, nothing to do. Let's sleep */
1347 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1348 goto fail;
1349
1350 if (ret == 0)
1351 goto finish;
1352
1353 /* Tell ALSA about this and process its response */
1354 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1355 struct pollfd *pollfd;
1356 int err;
1357 unsigned n;
1358
1359 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1360
1361 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1362 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1363 goto fail;
1364 }
1365
1366 if (revents & ~POLLOUT) {
1367 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1368 goto fail;
1369
1370 u->first = TRUE;
1371 u->since_start = 0;
1372 } else if (revents && u->use_tsched && pa_log_ratelimit())
1373 pa_log_debug("Wakeup from ALSA!");
1374
1375 } else
1376 revents = 0;
1377 }
1378
1379 fail:
1380 /* If this was no regular exit from the loop we have to continue
1381 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1382 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1383 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1384
1385 finish:
1386 pa_log_debug("Thread shutting down");
1387 }
1388
1389 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1390 const char *n;
1391 char *t;
1392
1393 pa_assert(data);
1394 pa_assert(ma);
1395 pa_assert(device_name);
1396
1397 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1398 pa_sink_new_data_set_name(data, n);
1399 data->namereg_fail = TRUE;
1400 return;
1401 }
1402
1403 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1404 data->namereg_fail = TRUE;
1405 else {
1406 n = device_id ? device_id : device_name;
1407 data->namereg_fail = FALSE;
1408 }
1409
1410 t = pa_sprintf_malloc("alsa_output.%s", n);
1411 pa_sink_new_data_set_name(data, t);
1412 pa_xfree(t);
1413 }
1414
1415 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1416 pa_assert(u);
1417
1418 if (!u->mixer_handle)
1419 return 0;
1420
1421 pa_assert(u->mixer_elem);
1422
1423 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1424 pa_bool_t suitable = FALSE;
1425
1426 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1427 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1428 else if (u->hw_volume_min >= u->hw_volume_max)
1429 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1430 else {
1431 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1432 suitable = TRUE;
1433 }
1434
1435 if (suitable) {
1436 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1437 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1438 else {
1439 #ifdef HAVE_VALGRIND_MEMCHECK_H
1440 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1441 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1442 #endif
1443
1444 if (u->hw_dB_min >= u->hw_dB_max)
1445 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1446 else {
1447 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1448 u->hw_dB_supported = TRUE;
1449
1450 if (u->hw_dB_max > 0) {
1451 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1452 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1453 } else
1454 pa_log_info("No particular base volume set, fixing to 0 dB");
1455 }
1456 }
1457
1458 if (!u->hw_dB_supported &&
1459 u->hw_volume_max - u->hw_volume_min < 3) {
1460
1461 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1462 suitable = FALSE;
1463 }
1464 }
1465
1466 if (suitable) {
1467 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1468
1469 u->sink->get_volume = sink_get_volume_cb;
1470 u->sink->set_volume = sink_set_volume_cb;
1471 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1472 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1473
1474 if (!u->hw_dB_supported)
1475 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1476 } else
1477 pa_log_info("Using software volume control.");
1478 }
1479
1480 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1481 u->sink->get_mute = sink_get_mute_cb;
1482 u->sink->set_mute = sink_set_mute_cb;
1483 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1484 } else
1485 pa_log_info("Using software mute control.");
1486
1487 u->mixer_fdl = pa_alsa_fdlist_new();
1488
1489 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1490 pa_log("Failed to initialize file descriptor monitoring");
1491 return -1;
1492 }
1493
1494 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1495 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1496
1497 return 0;
1498 }
1499
1500 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1501
1502 struct userdata *u = NULL;
1503 const char *dev_id = NULL;
1504 pa_sample_spec ss, requested_ss;
1505 pa_channel_map map;
1506 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1507 snd_pcm_uframes_t period_frames, tsched_frames;
1508 size_t frame_size;
1509 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1510 pa_sink_new_data data;
1511
1512 pa_assert(m);
1513 pa_assert(ma);
1514
1515 ss = m->core->default_sample_spec;
1516 map = m->core->default_channel_map;
1517 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1518 pa_log("Failed to parse sample specification and channel map");
1519 goto fail;
1520 }
1521
1522 requested_ss = ss;
1523 frame_size = pa_frame_size(&ss);
1524
1525 nfrags = m->core->default_n_fragments;
1526 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1527 if (frag_size <= 0)
1528 frag_size = (uint32_t) frame_size;
1529 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1530 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1531
1532 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1533 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1534 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1535 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1536 pa_log("Failed to parse buffer metrics");
1537 goto fail;
1538 }
1539
1540 hwbuf_size = frag_size * nfrags;
1541 period_frames = frag_size/frame_size;
1542 tsched_frames = tsched_size/frame_size;
1543
1544 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1545 pa_log("Failed to parse mmap argument.");
1546 goto fail;
1547 }
1548
1549 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1550 pa_log("Failed to parse tsched argument.");
1551 goto fail;
1552 }
1553
1554 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1555 pa_log("Failed to parse ignore_dB argument.");
1556 goto fail;
1557 }
1558
1559 if (use_tsched && !pa_rtclock_hrtimer()) {
1560 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1561 use_tsched = FALSE;
1562 }
1563
1564 u = pa_xnew0(struct userdata, 1);
1565 u->core = m->core;
1566 u->module = m;
1567 u->use_mmap = use_mmap;
1568 u->use_tsched = use_tsched;
1569 u->first = TRUE;
1570 u->rtpoll = pa_rtpoll_new();
1571 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1572
1573 u->smoother = pa_smoother_new(
1574 DEFAULT_TSCHED_BUFFER_USEC*2,
1575 DEFAULT_TSCHED_BUFFER_USEC*2,
1576 TRUE,
1577 TRUE,
1578 5,
1579 pa_rtclock_usec(),
1580 TRUE);
1581
1582 if (reserve_init(u, pa_modargs_get_value(
1583 ma, "device_id",
1584 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE))) < 0)
1585 goto fail;
1586
1587 b = use_mmap;
1588 d = use_tsched;
1589
1590 if (profile) {
1591
1592 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1593 pa_log("device_id= not set");
1594 goto fail;
1595 }
1596
1597 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1598 dev_id,
1599 &u->device_name,
1600 &ss, &map,
1601 SND_PCM_STREAM_PLAYBACK,
1602 &nfrags, &period_frames, tsched_frames,
1603 &b, &d, profile)))
1604
1605 goto fail;
1606
1607 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1608
1609 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1610 dev_id,
1611 &u->device_name,
1612 &ss, &map,
1613 SND_PCM_STREAM_PLAYBACK,
1614 &nfrags, &period_frames, tsched_frames,
1615 &b, &d, &profile)))
1616
1617 goto fail;
1618
1619 } else {
1620
1621 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1622 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1623 &u->device_name,
1624 &ss, &map,
1625 SND_PCM_STREAM_PLAYBACK,
1626 &nfrags, &period_frames, tsched_frames,
1627 &b, &d, FALSE)))
1628 goto fail;
1629
1630 }
1631
1632 pa_assert(u->device_name);
1633 pa_log_info("Successfully opened device %s.", u->device_name);
1634
1635 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1636 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1637 goto fail;
1638 }
1639
1640 if (profile)
1641 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1642
1643 if (use_mmap && !b) {
1644 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1645 u->use_mmap = use_mmap = FALSE;
1646 }
1647
1648 if (use_tsched && (!b || !d)) {
1649 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1650 u->use_tsched = use_tsched = FALSE;
1651 }
1652
1653 if (use_tsched && !pa_alsa_pcm_is_hw(u->pcm_handle)) {
1654 pa_log_info("Device is not a hardware device, disabling timer-based scheduling.");
1655 u->use_tsched = use_tsched = FALSE;
1656 }
1657
1658 if (u->use_mmap)
1659 pa_log_info("Successfully enabled mmap() mode.");
1660
1661 if (u->use_tsched)
1662 pa_log_info("Successfully enabled timer-based scheduling mode.");
1663
1664 /* ALSA might tweak the sample spec, so recalculate the frame size */
1665 frame_size = pa_frame_size(&ss);
1666
1667 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem, pa_modargs_get_value(ma, "control", NULL), profile);
1668
1669 pa_sink_new_data_init(&data);
1670 data.driver = driver;
1671 data.module = m;
1672 data.card = card;
1673 set_sink_name(&data, ma, dev_id, u->device_name);
1674 pa_sink_new_data_set_sample_spec(&data, &ss);
1675 pa_sink_new_data_set_channel_map(&data, &map);
1676
1677 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle, u->mixer_elem);
1678 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1679 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1680 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1681 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1682
1683 if (profile) {
1684 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1685 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1686 }
1687
1688 pa_alsa_init_description(data.proplist);
1689
1690 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY|(u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0));
1691 pa_sink_new_data_done(&data);
1692
1693 if (!u->sink) {
1694 pa_log("Failed to create sink object");
1695 goto fail;
1696 }
1697
1698 u->sink->parent.process_msg = sink_process_msg;
1699 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1700 u->sink->set_state = sink_set_state_cb;
1701 u->sink->userdata = u;
1702
1703 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1704 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1705
1706 u->frame_size = frame_size;
1707 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1708 u->nfragments = nfrags;
1709 u->hwbuf_size = u->fragment_size * nfrags;
1710 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1711 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1712
1713 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1714 nfrags, (long unsigned) u->fragment_size,
1715 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1716
1717 pa_sink_set_max_request(u->sink, u->hwbuf_size);
1718 pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
1719
1720 if (u->use_tsched) {
1721 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1722
1723 fix_min_sleep_wakeup(u);
1724 fix_tsched_watermark(u);
1725
1726 pa_sink_set_latency_range(u->sink,
1727 0,
1728 pa_bytes_to_usec(u->hwbuf_size, &ss));
1729
1730 pa_log_info("Time scheduling watermark is %0.2fms",
1731 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1732 } else
1733 u->sink->fixed_latency = pa_bytes_to_usec(u->hwbuf_size, &ss);
1734
1735 reserve_update(u);
1736
1737 if (update_sw_params(u) < 0)
1738 goto fail;
1739
1740 if (setup_mixer(u, ignore_dB) < 0)
1741 goto fail;
1742
1743 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1744
1745 if (!(u->thread = pa_thread_new(thread_func, u))) {
1746 pa_log("Failed to create thread.");
1747 goto fail;
1748 }
1749
1750 /* Get initial mixer settings */
1751 if (data.volume_is_set) {
1752 if (u->sink->set_volume)
1753 u->sink->set_volume(u->sink);
1754 } else {
1755 if (u->sink->get_volume)
1756 u->sink->get_volume(u->sink);
1757 }
1758
1759 if (data.muted_is_set) {
1760 if (u->sink->set_mute)
1761 u->sink->set_mute(u->sink);
1762 } else {
1763 if (u->sink->get_mute)
1764 u->sink->get_mute(u->sink);
1765 }
1766
1767 pa_sink_put(u->sink);
1768
1769 return u->sink;
1770
1771 fail:
1772
1773 userdata_free(u);
1774
1775 return NULL;
1776 }
1777
1778 static void userdata_free(struct userdata *u) {
1779 pa_assert(u);
1780
1781 if (u->sink)
1782 pa_sink_unlink(u->sink);
1783
1784 if (u->thread) {
1785 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1786 pa_thread_free(u->thread);
1787 }
1788
1789 pa_thread_mq_done(&u->thread_mq);
1790
1791 if (u->sink)
1792 pa_sink_unref(u->sink);
1793
1794 if (u->memchunk.memblock)
1795 pa_memblock_unref(u->memchunk.memblock);
1796
1797 if (u->alsa_rtpoll_item)
1798 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1799
1800 if (u->rtpoll)
1801 pa_rtpoll_free(u->rtpoll);
1802
1803 if (u->mixer_fdl)
1804 pa_alsa_fdlist_free(u->mixer_fdl);
1805
1806 if (u->mixer_handle)
1807 snd_mixer_close(u->mixer_handle);
1808
1809 if (u->pcm_handle) {
1810 snd_pcm_drop(u->pcm_handle);
1811 snd_pcm_close(u->pcm_handle);
1812 }
1813
1814 if (u->smoother)
1815 pa_smoother_free(u->smoother);
1816
1817 reserve_done(u);
1818
1819 pa_xfree(u->device_name);
1820 pa_xfree(u);
1821 }
1822
1823 void pa_alsa_sink_free(pa_sink *s) {
1824 struct userdata *u;
1825
1826 pa_sink_assert_ref(s);
1827 pa_assert_se(u = s->userdata);
1828
1829 userdata_free(u);
1830 }