]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'coling/master'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38 #include <pulse/i18n.h>
39
40 #include <pulsecore/core.h>
41 #include <pulsecore/module.h>
42 #include <pulsecore/memchunk.h>
43 #include <pulsecore/sink.h>
44 #include <pulsecore/modargs.h>
45 #include <pulsecore/core-util.h>
46 #include <pulsecore/sample-util.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/thread.h>
50 #include <pulsecore/core-error.h>
51 #include <pulsecore/thread-mq.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/rtclock.h>
54 #include <pulsecore/time-smoother.h>
55
56 #include <modules/reserve-wrap.h>
57
58 #include "alsa-util.h"
59 #include "alsa-sink.h"
60
61 /* #define DEBUG_TIMING */
62
63 #define DEFAULT_DEVICE "default"
64 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */
65 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */
66 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */
67 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */
68 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/
69
70 struct userdata {
71 pa_core *core;
72 pa_module *module;
73 pa_sink *sink;
74
75 pa_thread *thread;
76 pa_thread_mq thread_mq;
77 pa_rtpoll *rtpoll;
78
79 snd_pcm_t *pcm_handle;
80
81 pa_alsa_fdlist *mixer_fdl;
82 snd_mixer_t *mixer_handle;
83 snd_mixer_elem_t *mixer_elem;
84 long hw_volume_max, hw_volume_min;
85 long hw_dB_max, hw_dB_min;
86 pa_bool_t hw_dB_supported:1;
87 pa_bool_t mixer_seperate_channels:1;
88 pa_cvolume hardware_volume;
89
90 size_t
91 frame_size,
92 fragment_size,
93 hwbuf_size,
94 tsched_watermark,
95 hwbuf_unused,
96 min_sleep,
97 min_wakeup,
98 watermark_step;
99
100 unsigned nfragments;
101 pa_memchunk memchunk;
102
103 char *device_name;
104
105 pa_bool_t use_mmap:1, use_tsched:1;
106
107 pa_bool_t first, after_rewind;
108
109 pa_rtpoll_item *alsa_rtpoll_item;
110
111 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
112
113 pa_smoother *smoother;
114 uint64_t write_count;
115 uint64_t since_start;
116
117 pa_reserve_wrapper *reserve;
118 pa_hook_slot *reserve_slot;
119 };
120
121 static void userdata_free(struct userdata *u);
122
123 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
124 pa_assert(r);
125 pa_assert(u);
126
127 if (pa_sink_suspend(u->sink, TRUE) < 0)
128 return PA_HOOK_CANCEL;
129
130 return PA_HOOK_OK;
131 }
132
133 static void reserve_done(struct userdata *u) {
134 pa_assert(u);
135
136 if (u->reserve_slot) {
137 pa_hook_slot_free(u->reserve_slot);
138 u->reserve_slot = NULL;
139 }
140
141 if (u->reserve) {
142 pa_reserve_wrapper_unref(u->reserve);
143 u->reserve = NULL;
144 }
145 }
146
147 static void reserve_update(struct userdata *u) {
148 const char *description;
149 pa_assert(u);
150
151 if (!u->sink || !u->reserve)
152 return;
153
154 if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
155 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
156 }
157
158 static int reserve_init(struct userdata *u, const char *dname) {
159 char *rname;
160
161 pa_assert(u);
162 pa_assert(dname);
163
164 if (u->reserve)
165 return 0;
166
167 /* We are resuming, try to lock the device */
168 if (!(rname = pa_alsa_get_reserve_name(dname)))
169 return 0;
170
171 u->reserve = pa_reserve_wrapper_get(u->core, rname);
172 pa_xfree(rname);
173
174 if (!(u->reserve))
175 return -1;
176
177 reserve_update(u);
178
179 pa_assert(!u->reserve_slot);
180 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
181
182 return 0;
183 }
184
185 static void fix_min_sleep_wakeup(struct userdata *u) {
186 size_t max_use, max_use_2;
187
188 pa_assert(u);
189
190 max_use = u->hwbuf_size - u->hwbuf_unused;
191 max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
192
193 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
194 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
195
196 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
197 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
198 }
199
200 static void fix_tsched_watermark(struct userdata *u) {
201 size_t max_use;
202 pa_assert(u);
203
204 max_use = u->hwbuf_size - u->hwbuf_unused;
205
206 if (u->tsched_watermark > max_use - u->min_sleep)
207 u->tsched_watermark = max_use - u->min_sleep;
208
209 if (u->tsched_watermark < u->min_wakeup)
210 u->tsched_watermark = u->min_wakeup;
211 }
212
213 static void adjust_after_underrun(struct userdata *u) {
214 size_t old_watermark;
215 pa_usec_t old_min_latency, new_min_latency;
216
217 pa_assert(u);
218 pa_assert(u->use_tsched);
219
220 /* First, just try to increase the watermark */
221 old_watermark = u->tsched_watermark;
222 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step);
223 fix_tsched_watermark(u);
224
225 if (old_watermark != u->tsched_watermark) {
226 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
227 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
228 return;
229 }
230
231 /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
232 old_min_latency = u->sink->thread_info.min_latency;
233 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC);
234 new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
235
236 if (old_min_latency != new_min_latency) {
237 pa_log_notice("Increasing minimal latency to %0.2f ms",
238 (double) new_min_latency / PA_USEC_PER_MSEC);
239
240 pa_sink_update_latency_range(u->sink, new_min_latency, u->sink->thread_info.max_latency);
241 return;
242 }
243
244 /* When we reach this we're officialy fucked! */
245 }
246
247 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
248 pa_usec_t usec, wm;
249
250 pa_assert(sleep_usec);
251 pa_assert(process_usec);
252
253 pa_assert(u);
254
255 usec = pa_sink_get_requested_latency_within_thread(u->sink);
256
257 if (usec == (pa_usec_t) -1)
258 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
259
260 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
261
262 if (wm > usec)
263 wm = usec/2;
264
265 *sleep_usec = usec - wm;
266 *process_usec = wm;
267
268 #ifdef DEBUG_TIMING
269 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
270 (unsigned long) (usec / PA_USEC_PER_MSEC),
271 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
272 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
273 #endif
274 }
275
276 static int try_recover(struct userdata *u, const char *call, int err) {
277 pa_assert(u);
278 pa_assert(call);
279 pa_assert(err < 0);
280
281 pa_log_debug("%s: %s", call, snd_strerror(err));
282
283 pa_assert(err != -EAGAIN);
284
285 if (err == -EPIPE)
286 pa_log_debug("%s: Buffer underrun!", call);
287
288 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
289 pa_log("%s: %s", call, snd_strerror(err));
290 return -1;
291 }
292
293 u->first = TRUE;
294 u->since_start = 0;
295 return 0;
296 }
297
298 static size_t check_left_to_play(struct userdata *u, size_t n_bytes) {
299 size_t left_to_play;
300
301 /* We use <= instead of < for this check here because an underrun
302 * only happens after the last sample was processed, not already when
303 * it is removed from the buffer. This is particularly important
304 * when block transfer is used. */
305
306 if (n_bytes <= u->hwbuf_size) {
307 left_to_play = u->hwbuf_size - n_bytes;
308
309 #ifdef DEBUG_TIMING
310 pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
311 #endif
312
313 } else {
314 left_to_play = 0;
315
316 #ifdef DEBUG_TIMING
317 PA_DEBUG_TRAP;
318 #endif
319
320 if (!u->first && !u->after_rewind) {
321
322 if (pa_log_ratelimit())
323 pa_log_info("Underrun!");
324
325 if (u->use_tsched)
326 adjust_after_underrun(u);
327 }
328 }
329
330 return left_to_play;
331 }
332
333 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
334 pa_bool_t work_done = TRUE;
335 pa_usec_t max_sleep_usec = 0, process_usec = 0;
336 size_t left_to_play;
337 unsigned j = 0;
338
339 pa_assert(u);
340 pa_sink_assert_ref(u->sink);
341
342 if (u->use_tsched)
343 hw_sleep_time(u, &max_sleep_usec, &process_usec);
344
345 for (;;) {
346 snd_pcm_sframes_t n;
347 size_t n_bytes;
348 int r;
349
350 /* First we determine how many samples are missing to fill the
351 * buffer up to 100% */
352
353 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
354
355 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
356 continue;
357
358 return r;
359 }
360
361 n_bytes = (size_t) n * u->frame_size;
362
363 #ifdef DEBUG_TIMING
364 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
365 #endif
366
367 left_to_play = check_left_to_play(u, n_bytes);
368
369 if (u->use_tsched)
370
371 /* We won't fill up the playback buffer before at least
372 * half the sleep time is over because otherwise we might
373 * ask for more data from the clients then they expect. We
374 * need to guarantee that clients only have to keep around
375 * a single hw buffer length. */
376
377 if (!polled &&
378 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
379 #ifdef DEBUG_TIMING
380 pa_log_debug("Not filling up, because too early.");
381 #endif
382 break;
383 }
384
385 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
386
387 if (polled)
388 PA_ONCE_BEGIN {
389 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
390 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
391 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
392 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
393 pa_strnull(dn));
394 pa_xfree(dn);
395 } PA_ONCE_END;
396
397 #ifdef DEBUG_TIMING
398 pa_log_debug("Not filling up, because not necessary.");
399 #endif
400 break;
401 }
402
403
404 if (++j > 10) {
405 #ifdef DEBUG_TIMING
406 pa_log_debug("Not filling up, because already too many iterations.");
407 #endif
408
409 break;
410 }
411
412 n_bytes -= u->hwbuf_unused;
413 polled = FALSE;
414
415 #ifdef DEBUG_TIMING
416 pa_log_debug("Filling up");
417 #endif
418
419 for (;;) {
420 pa_memchunk chunk;
421 void *p;
422 int err;
423 const snd_pcm_channel_area_t *areas;
424 snd_pcm_uframes_t offset, frames;
425 snd_pcm_sframes_t sframes;
426
427 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
428 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
429
430 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
431
432 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
433 continue;
434
435 return r;
436 }
437
438 /* Make sure that if these memblocks need to be copied they will fit into one slot */
439 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
440 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
441
442 /* Check these are multiples of 8 bit */
443 pa_assert((areas[0].first & 7) == 0);
444 pa_assert((areas[0].step & 7)== 0);
445
446 /* We assume a single interleaved memory buffer */
447 pa_assert((areas[0].first >> 3) == 0);
448 pa_assert((areas[0].step >> 3) == u->frame_size);
449
450 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
451
452 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
453 chunk.length = pa_memblock_get_length(chunk.memblock);
454 chunk.index = 0;
455
456 pa_sink_render_into_full(u->sink, &chunk);
457 pa_memblock_unref_fixed(chunk.memblock);
458
459 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
460
461 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
462 continue;
463
464 return r;
465 }
466
467 work_done = TRUE;
468
469 u->write_count += frames * u->frame_size;
470 u->since_start += frames * u->frame_size;
471
472 #ifdef DEBUG_TIMING
473 pa_log_debug("Wrote %lu bytes", (unsigned long) (frames * u->frame_size));
474 #endif
475
476 if ((size_t) frames * u->frame_size >= n_bytes)
477 break;
478
479 n_bytes -= (size_t) frames * u->frame_size;
480 }
481 }
482
483 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
484 return work_done ? 1 : 0;
485 }
486
487 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
488 pa_bool_t work_done = FALSE;
489 pa_usec_t max_sleep_usec = 0, process_usec = 0;
490 size_t left_to_play;
491 unsigned j = 0;
492
493 pa_assert(u);
494 pa_sink_assert_ref(u->sink);
495
496 if (u->use_tsched)
497 hw_sleep_time(u, &max_sleep_usec, &process_usec);
498
499 for (;;) {
500 snd_pcm_sframes_t n;
501 size_t n_bytes;
502 int r;
503
504 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
505
506 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
507 continue;
508
509 return r;
510 }
511
512 n_bytes = (size_t) n * u->frame_size;
513 left_to_play = check_left_to_play(u, n_bytes);
514
515 if (u->use_tsched)
516
517 /* We won't fill up the playback buffer before at least
518 * half the sleep time is over because otherwise we might
519 * ask for more data from the clients then they expect. We
520 * need to guarantee that clients only have to keep around
521 * a single hw buffer length. */
522
523 if (!polled &&
524 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
525 break;
526
527 if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
528
529 if (polled)
530 PA_ONCE_BEGIN {
531 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
532 pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write!\n"
533 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
534 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
535 pa_strnull(dn));
536 pa_xfree(dn);
537 } PA_ONCE_END;
538
539 break;
540 }
541
542 if (++j > 10) {
543 #ifdef DEBUG_TIMING
544 pa_log_debug("Not filling up, because already too many iterations.");
545 #endif
546
547 break;
548 }
549
550 n_bytes -= u->hwbuf_unused;
551 polled = FALSE;
552
553 for (;;) {
554 snd_pcm_sframes_t frames;
555 void *p;
556
557 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
558
559 if (u->memchunk.length <= 0)
560 pa_sink_render(u->sink, n_bytes, &u->memchunk);
561
562 pa_assert(u->memchunk.length > 0);
563
564 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
565
566 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
567 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
568
569 p = pa_memblock_acquire(u->memchunk.memblock);
570 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
571 pa_memblock_release(u->memchunk.memblock);
572
573 pa_assert(frames != 0);
574
575 if (PA_UNLIKELY(frames < 0)) {
576
577 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
578 continue;
579
580 return r;
581 }
582
583 u->memchunk.index += (size_t) frames * u->frame_size;
584 u->memchunk.length -= (size_t) frames * u->frame_size;
585
586 if (u->memchunk.length <= 0) {
587 pa_memblock_unref(u->memchunk.memblock);
588 pa_memchunk_reset(&u->memchunk);
589 }
590
591 work_done = TRUE;
592
593 u->write_count += frames * u->frame_size;
594 u->since_start += frames * u->frame_size;
595
596 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
597
598 if ((size_t) frames * u->frame_size >= n_bytes)
599 break;
600
601 n_bytes -= (size_t) frames * u->frame_size;
602 }
603 }
604
605 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
606 return work_done ? 1 : 0;
607 }
608
609 static void update_smoother(struct userdata *u) {
610 snd_pcm_sframes_t delay = 0;
611 int64_t position;
612 int err;
613 pa_usec_t now1 = 0, now2;
614 snd_pcm_status_t *status;
615
616 snd_pcm_status_alloca(&status);
617
618 pa_assert(u);
619 pa_assert(u->pcm_handle);
620
621 /* Let's update the time smoother */
622
623 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
624 pa_log_warn("Failed to query DSP status data: %s", snd_strerror(err));
625 return;
626 }
627
628 if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
629 pa_log_warn("Failed to get timestamp: %s", snd_strerror(err));
630 else {
631 snd_htimestamp_t htstamp = { 0, 0 };
632 snd_pcm_status_get_htstamp(status, &htstamp);
633 now1 = pa_timespec_load(&htstamp);
634 }
635
636 position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
637
638 if (PA_UNLIKELY(position < 0))
639 position = 0;
640
641 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
642 if (now1 <= 0)
643 now1 = pa_rtclock_usec();
644
645 now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
646
647 pa_smoother_put(u->smoother, now1, now2);
648 }
649
650 static pa_usec_t sink_get_latency(struct userdata *u) {
651 pa_usec_t r;
652 int64_t delay;
653 pa_usec_t now1, now2;
654
655 pa_assert(u);
656
657 now1 = pa_rtclock_usec();
658 now2 = pa_smoother_get(u->smoother, now1);
659
660 delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
661
662 r = delay >= 0 ? (pa_usec_t) delay : 0;
663
664 if (u->memchunk.memblock)
665 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
666
667 return r;
668 }
669
670 static int build_pollfd(struct userdata *u) {
671 pa_assert(u);
672 pa_assert(u->pcm_handle);
673
674 if (u->alsa_rtpoll_item)
675 pa_rtpoll_item_free(u->alsa_rtpoll_item);
676
677 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
678 return -1;
679
680 return 0;
681 }
682
683 /* Called from IO context */
684 static int suspend(struct userdata *u) {
685 pa_assert(u);
686 pa_assert(u->pcm_handle);
687
688 pa_smoother_pause(u->smoother, pa_rtclock_usec());
689
690 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
691 * take awfully long with our long buffer sizes today. */
692 snd_pcm_close(u->pcm_handle);
693 u->pcm_handle = NULL;
694
695 if (u->alsa_rtpoll_item) {
696 pa_rtpoll_item_free(u->alsa_rtpoll_item);
697 u->alsa_rtpoll_item = NULL;
698 }
699
700 pa_log_info("Device suspended...");
701
702 return 0;
703 }
704
705 /* Called from IO context */
706 static int update_sw_params(struct userdata *u) {
707 snd_pcm_uframes_t avail_min;
708 int err;
709
710 pa_assert(u);
711
712 /* Use the full buffer if noone asked us for anything specific */
713 u->hwbuf_unused = 0;
714
715 if (u->use_tsched) {
716 pa_usec_t latency;
717
718 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
719 size_t b;
720
721 pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
722
723 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
724
725 /* We need at least one sample in our buffer */
726
727 if (PA_UNLIKELY(b < u->frame_size))
728 b = u->frame_size;
729
730 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
731 }
732
733 fix_min_sleep_wakeup(u);
734 fix_tsched_watermark(u);
735 }
736
737 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
738
739 /* We need at last one frame in the used part of the buffer */
740 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
741
742 if (u->use_tsched) {
743 pa_usec_t sleep_usec, process_usec;
744
745 hw_sleep_time(u, &sleep_usec, &process_usec);
746 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
747 }
748
749 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
750
751 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
752 pa_log("Failed to set software parameters: %s", snd_strerror(err));
753 return err;
754 }
755
756 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused);
757
758 return 0;
759 }
760
761 /* Called from IO context */
762 static int unsuspend(struct userdata *u) {
763 pa_sample_spec ss;
764 int err;
765 pa_bool_t b, d;
766 unsigned nfrags;
767 snd_pcm_uframes_t period_size;
768
769 pa_assert(u);
770 pa_assert(!u->pcm_handle);
771
772 pa_log_info("Trying resume...");
773
774 snd_config_update_free_global();
775 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
776 /*SND_PCM_NONBLOCK|*/
777 SND_PCM_NO_AUTO_RESAMPLE|
778 SND_PCM_NO_AUTO_CHANNELS|
779 SND_PCM_NO_AUTO_FORMAT)) < 0) {
780 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
781 goto fail;
782 }
783
784 ss = u->sink->sample_spec;
785 nfrags = u->nfragments;
786 period_size = u->fragment_size / u->frame_size;
787 b = u->use_mmap;
788 d = u->use_tsched;
789
790 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
791 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
792 goto fail;
793 }
794
795 if (b != u->use_mmap || d != u->use_tsched) {
796 pa_log_warn("Resume failed, couldn't get original access mode.");
797 goto fail;
798 }
799
800 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
801 pa_log_warn("Resume failed, couldn't restore original sample settings.");
802 goto fail;
803 }
804
805 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
806 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
807 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
808 (unsigned long) nfrags, period_size * u->frame_size);
809 goto fail;
810 }
811
812 if (update_sw_params(u) < 0)
813 goto fail;
814
815 if (build_pollfd(u) < 0)
816 goto fail;
817
818 u->first = TRUE;
819 u->since_start = 0;
820
821 pa_log_info("Resumed successfully...");
822
823 return 0;
824
825 fail:
826 if (u->pcm_handle) {
827 snd_pcm_close(u->pcm_handle);
828 u->pcm_handle = NULL;
829 }
830
831 return -1;
832 }
833
834 /* Called from IO context */
835 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
836 struct userdata *u = PA_SINK(o)->userdata;
837
838 switch (code) {
839
840 case PA_SINK_MESSAGE_GET_LATENCY: {
841 pa_usec_t r = 0;
842
843 if (u->pcm_handle)
844 r = sink_get_latency(u);
845
846 *((pa_usec_t*) data) = r;
847
848 return 0;
849 }
850
851 case PA_SINK_MESSAGE_SET_STATE:
852
853 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
854
855 case PA_SINK_SUSPENDED:
856 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
857
858 if (suspend(u) < 0)
859 return -1;
860
861 break;
862
863 case PA_SINK_IDLE:
864 case PA_SINK_RUNNING:
865
866 if (u->sink->thread_info.state == PA_SINK_INIT) {
867 if (build_pollfd(u) < 0)
868 return -1;
869 }
870
871 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
872 if (unsuspend(u) < 0)
873 return -1;
874 }
875
876 break;
877
878 case PA_SINK_UNLINKED:
879 case PA_SINK_INIT:
880 case PA_SINK_INVALID_STATE:
881 ;
882 }
883
884 break;
885 }
886
887 return pa_sink_process_msg(o, code, data, offset, chunk);
888 }
889
890 /* Called from main context */
891 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t new_state) {
892 pa_sink_state_t old_state;
893 struct userdata *u;
894
895 pa_sink_assert_ref(s);
896 pa_assert_se(u = s->userdata);
897
898 old_state = pa_sink_get_state(u->sink);
899
900 if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
901 reserve_done(u);
902 else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
903 if (reserve_init(u, u->device_name) < 0)
904 return -1;
905
906 return 0;
907 }
908
909 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
910 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
911
912 pa_assert(u);
913 pa_assert(u->mixer_handle);
914
915 if (mask == SND_CTL_EVENT_MASK_REMOVE)
916 return 0;
917
918 if (mask & SND_CTL_EVENT_MASK_VALUE) {
919 pa_sink_get_volume(u->sink, TRUE);
920 pa_sink_get_mute(u->sink, TRUE);
921 }
922
923 return 0;
924 }
925
926 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
927
928 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
929 (double) (u->hw_volume_max - u->hw_volume_min));
930 }
931
932 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
933 long alsa_vol;
934
935 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
936 / PA_VOLUME_NORM) + u->hw_volume_min;
937
938 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
939 }
940
941 static void sink_get_volume_cb(pa_sink *s) {
942 struct userdata *u = s->userdata;
943 int err;
944 unsigned i;
945 pa_cvolume r;
946 char t[PA_CVOLUME_SNPRINT_MAX];
947
948 pa_assert(u);
949 pa_assert(u->mixer_elem);
950
951 if (u->mixer_seperate_channels) {
952
953 r.channels = s->sample_spec.channels;
954
955 for (i = 0; i < s->sample_spec.channels; i++) {
956 long alsa_vol;
957
958 if (u->hw_dB_supported) {
959
960 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
961 goto fail;
962
963 #ifdef HAVE_VALGRIND_MEMCHECK_H
964 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
965 #endif
966
967 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
968 } else {
969
970 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
971 goto fail;
972
973 r.values[i] = from_alsa_volume(u, alsa_vol);
974 }
975 }
976
977 } else {
978 long alsa_vol;
979
980 if (u->hw_dB_supported) {
981
982 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
983 goto fail;
984
985 #ifdef HAVE_VALGRIND_MEMCHECK_H
986 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
987 #endif
988
989 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
990
991 } else {
992
993 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
994 goto fail;
995
996 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
997 }
998 }
999
1000 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
1001
1002 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
1003
1004 s->virtual_volume = u->hardware_volume = r;
1005
1006 if (u->hw_dB_supported) {
1007 pa_cvolume reset;
1008
1009 /* Hmm, so the hardware volume changed, let's reset our software volume */
1010 pa_cvolume_reset(&reset, s->sample_spec.channels);
1011 pa_sink_set_soft_volume(s, &reset);
1012 }
1013 }
1014
1015 return;
1016
1017 fail:
1018 pa_log_error("Unable to read volume: %s", snd_strerror(err));
1019 }
1020
1021 static void sink_set_volume_cb(pa_sink *s) {
1022 struct userdata *u = s->userdata;
1023 int err;
1024 unsigned i;
1025 pa_cvolume r;
1026
1027 pa_assert(u);
1028 pa_assert(u->mixer_elem);
1029
1030 if (u->mixer_seperate_channels) {
1031
1032 r.channels = s->sample_spec.channels;
1033
1034 for (i = 0; i < s->sample_spec.channels; i++) {
1035 long alsa_vol;
1036 pa_volume_t vol;
1037
1038 vol = s->virtual_volume.values[i];
1039
1040 if (u->hw_dB_supported) {
1041
1042 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1043 alsa_vol += u->hw_dB_max;
1044 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1045
1046 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
1047 goto fail;
1048
1049 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1050 goto fail;
1051
1052 #ifdef HAVE_VALGRIND_MEMCHECK_H
1053 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1054 #endif
1055
1056 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
1057
1058 } else {
1059 alsa_vol = to_alsa_volume(u, vol);
1060
1061 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
1062 goto fail;
1063
1064 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
1065 goto fail;
1066
1067 r.values[i] = from_alsa_volume(u, alsa_vol);
1068 }
1069 }
1070
1071 } else {
1072 pa_volume_t vol;
1073 long alsa_vol;
1074
1075 vol = pa_cvolume_max(&s->virtual_volume);
1076
1077 if (u->hw_dB_supported) {
1078 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
1079 alsa_vol += u->hw_dB_max;
1080 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
1081
1082 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
1083 goto fail;
1084
1085 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1086 goto fail;
1087
1088 #ifdef HAVE_VALGRIND_MEMCHECK_H
1089 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
1090 #endif
1091
1092 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
1093
1094 } else {
1095 alsa_vol = to_alsa_volume(u, vol);
1096
1097 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
1098 goto fail;
1099
1100 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
1101 goto fail;
1102
1103 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
1104 }
1105 }
1106
1107 u->hardware_volume = r;
1108
1109 if (u->hw_dB_supported) {
1110 char t[PA_CVOLUME_SNPRINT_MAX];
1111
1112 /* Match exactly what the user requested by software */
1113 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
1114
1115 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
1116 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
1117 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
1118
1119 } else
1120
1121 /* We can't match exactly what the user requested, hence let's
1122 * at least tell the user about it */
1123
1124 s->virtual_volume = r;
1125
1126 return;
1127
1128 fail:
1129 pa_log_error("Unable to set volume: %s", snd_strerror(err));
1130 }
1131
1132 static void sink_get_mute_cb(pa_sink *s) {
1133 struct userdata *u = s->userdata;
1134 int err, sw;
1135
1136 pa_assert(u);
1137 pa_assert(u->mixer_elem);
1138
1139 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
1140 pa_log_error("Unable to get switch: %s", snd_strerror(err));
1141 return;
1142 }
1143
1144 s->muted = !sw;
1145 }
1146
1147 static void sink_set_mute_cb(pa_sink *s) {
1148 struct userdata *u = s->userdata;
1149 int err;
1150
1151 pa_assert(u);
1152 pa_assert(u->mixer_elem);
1153
1154 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1155 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1156 return;
1157 }
1158 }
1159
1160 static void sink_update_requested_latency_cb(pa_sink *s) {
1161 struct userdata *u = s->userdata;
1162 size_t before;
1163 pa_assert(u);
1164
1165 if (!u->pcm_handle)
1166 return;
1167
1168 before = u->hwbuf_unused;
1169 update_sw_params(u);
1170
1171 /* Let's check whether we now use only a smaller part of the
1172 buffer then before. If so, we need to make sure that subsequent
1173 rewinds are relative to the new maxium fill level and not to the
1174 current fill level. Thus, let's do a full rewind once, to clear
1175 things up. */
1176
1177 if (u->hwbuf_unused > before) {
1178 pa_log_debug("Requesting rewind due to latency change.");
1179 pa_sink_request_rewind(s, (size_t) -1);
1180 }
1181 }
1182
1183 static int process_rewind(struct userdata *u) {
1184 snd_pcm_sframes_t unused;
1185 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1186 pa_assert(u);
1187
1188 /* Figure out how much we shall rewind and reset the counter */
1189 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1190 u->sink->thread_info.rewind_nbytes = 0;
1191
1192 if (rewind_nbytes <= 0)
1193 goto finish;
1194
1195 pa_assert(rewind_nbytes > 0);
1196 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1197
1198 snd_pcm_hwsync(u->pcm_handle);
1199 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1200 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1201 return -1;
1202 }
1203
1204 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1205
1206 if (u->hwbuf_size > unused_nbytes)
1207 limit_nbytes = u->hwbuf_size - unused_nbytes;
1208 else
1209 limit_nbytes = 0;
1210
1211 if (rewind_nbytes > limit_nbytes)
1212 rewind_nbytes = limit_nbytes;
1213
1214 if (rewind_nbytes > 0) {
1215 snd_pcm_sframes_t in_frames, out_frames;
1216
1217 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1218
1219 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1220 pa_log_debug("before: %lu", (unsigned long) in_frames);
1221 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1222 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1223 return -1;
1224 }
1225 pa_log_debug("after: %lu", (unsigned long) out_frames);
1226
1227 rewind_nbytes = (size_t) out_frames * u->frame_size;
1228
1229 if (rewind_nbytes <= 0)
1230 pa_log_info("Tried rewind, but was apparently not possible.");
1231 else {
1232 u->write_count -= out_frames * u->frame_size;
1233 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1234 pa_sink_process_rewind(u->sink, rewind_nbytes);
1235
1236 u->after_rewind = TRUE;
1237 return 0;
1238 }
1239 } else
1240 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1241
1242 finish:
1243
1244 pa_sink_process_rewind(u->sink, 0);
1245
1246 return 0;
1247
1248 }
1249
1250 static void thread_func(void *userdata) {
1251 struct userdata *u = userdata;
1252 unsigned short revents = 0;
1253
1254 pa_assert(u);
1255
1256 pa_log_debug("Thread starting up");
1257
1258 if (u->core->realtime_scheduling)
1259 pa_make_realtime(u->core->realtime_priority);
1260
1261 pa_thread_mq_install(&u->thread_mq);
1262 pa_rtpoll_install(u->rtpoll);
1263
1264 for (;;) {
1265 int ret;
1266
1267 #ifdef DEBUG_TIMING
1268 pa_log_debug("Loop");
1269 #endif
1270
1271 /* Render some data and write it to the dsp */
1272 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1273 int work_done;
1274 pa_usec_t sleep_usec = 0;
1275
1276 if (u->sink->thread_info.rewind_requested)
1277 if (process_rewind(u) < 0)
1278 goto fail;
1279
1280 if (u->use_mmap)
1281 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1282 else
1283 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1284
1285 if (work_done < 0)
1286 goto fail;
1287
1288 /* pa_log_debug("work_done = %i", work_done); */
1289
1290 if (work_done) {
1291
1292 if (u->first) {
1293 pa_log_info("Starting playback.");
1294 snd_pcm_start(u->pcm_handle);
1295
1296 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1297 }
1298
1299 update_smoother(u);
1300 }
1301
1302 if (u->use_tsched) {
1303 pa_usec_t cusec;
1304
1305 if (u->since_start <= u->hwbuf_size) {
1306
1307 /* USB devices on ALSA seem to hit a buffer
1308 * underrun during the first iterations much
1309 * quicker then we calculate here, probably due to
1310 * the transport latency. To accomodate for that
1311 * we artificially decrease the sleep time until
1312 * we have filled the buffer at least once
1313 * completely.*/
1314
1315 pa_log_debug("Cutting sleep time for the initial iterations by half.");
1316 sleep_usec /= 2;
1317 }
1318
1319 /* OK, the playback buffer is now full, let's
1320 * calculate when to wake up next */
1321 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1322
1323 /* Convert from the sound card time domain to the
1324 * system time domain */
1325 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1326
1327 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1328
1329 /* We don't trust the conversion, so we wake up whatever comes first */
1330 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1331 }
1332
1333 u->first = FALSE;
1334 u->after_rewind = FALSE;
1335
1336 } else if (u->use_tsched)
1337
1338 /* OK, we're in an invalid state, let's disable our timers */
1339 pa_rtpoll_set_timer_disabled(u->rtpoll);
1340
1341 /* Hmm, nothing to do. Let's sleep */
1342 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1343 goto fail;
1344
1345 if (ret == 0)
1346 goto finish;
1347
1348 /* Tell ALSA about this and process its response */
1349 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1350 struct pollfd *pollfd;
1351 int err;
1352 unsigned n;
1353
1354 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1355
1356 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1357 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1358 goto fail;
1359 }
1360
1361 if (revents & ~POLLOUT) {
1362 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1363 goto fail;
1364
1365 u->first = TRUE;
1366 u->since_start = 0;
1367 } else if (revents && u->use_tsched && pa_log_ratelimit())
1368 pa_log_debug("Wakeup from ALSA!");
1369
1370 } else
1371 revents = 0;
1372 }
1373
1374 fail:
1375 /* If this was no regular exit from the loop we have to continue
1376 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1377 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1378 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1379
1380 finish:
1381 pa_log_debug("Thread shutting down");
1382 }
1383
1384 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1385 const char *n;
1386 char *t;
1387
1388 pa_assert(data);
1389 pa_assert(ma);
1390 pa_assert(device_name);
1391
1392 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1393 pa_sink_new_data_set_name(data, n);
1394 data->namereg_fail = TRUE;
1395 return;
1396 }
1397
1398 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1399 data->namereg_fail = TRUE;
1400 else {
1401 n = device_id ? device_id : device_name;
1402 data->namereg_fail = FALSE;
1403 }
1404
1405 t = pa_sprintf_malloc("alsa_output.%s", n);
1406 pa_sink_new_data_set_name(data, t);
1407 pa_xfree(t);
1408 }
1409
1410 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1411 pa_assert(u);
1412
1413 if (!u->mixer_handle)
1414 return 0;
1415
1416 pa_assert(u->mixer_elem);
1417
1418 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1419 pa_bool_t suitable = FALSE;
1420
1421 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1422 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1423 else if (u->hw_volume_min >= u->hw_volume_max)
1424 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1425 else {
1426 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1427 suitable = TRUE;
1428 }
1429
1430 if (suitable) {
1431 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1432 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1433 else {
1434 #ifdef HAVE_VALGRIND_MEMCHECK_H
1435 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1436 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1437 #endif
1438
1439 if (u->hw_dB_min >= u->hw_dB_max)
1440 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1441 else {
1442 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1443 u->hw_dB_supported = TRUE;
1444
1445 if (u->hw_dB_max > 0) {
1446 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1447 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1448 } else
1449 pa_log_info("No particular base volume set, fixing to 0 dB");
1450 }
1451 }
1452
1453 if (!u->hw_dB_supported &&
1454 u->hw_volume_max - u->hw_volume_min < 3) {
1455
1456 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1457 suitable = FALSE;
1458 }
1459 }
1460
1461 if (suitable) {
1462 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &u->sink->channel_map, u->mixer_map, TRUE) >= 0;
1463
1464 u->sink->get_volume = sink_get_volume_cb;
1465 u->sink->set_volume = sink_set_volume_cb;
1466 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1467 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1468
1469 if (!u->hw_dB_supported)
1470 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1471 } else
1472 pa_log_info("Using software volume control.");
1473 }
1474
1475 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1476 u->sink->get_mute = sink_get_mute_cb;
1477 u->sink->set_mute = sink_set_mute_cb;
1478 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1479 } else
1480 pa_log_info("Using software mute control.");
1481
1482 u->mixer_fdl = pa_alsa_fdlist_new();
1483
1484 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1485 pa_log("Failed to initialize file descriptor monitoring");
1486 return -1;
1487 }
1488
1489 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1490 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1491
1492 return 0;
1493 }
1494
1495 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1496
1497 struct userdata *u = NULL;
1498 const char *dev_id = NULL;
1499 pa_sample_spec ss, requested_ss;
1500 pa_channel_map map;
1501 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1502 snd_pcm_uframes_t period_frames, tsched_frames;
1503 size_t frame_size;
1504 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1505 pa_usec_t usec;
1506 pa_sink_new_data data;
1507
1508 pa_assert(m);
1509 pa_assert(ma);
1510
1511 ss = m->core->default_sample_spec;
1512 map = m->core->default_channel_map;
1513 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1514 pa_log("Failed to parse sample specification and channel map");
1515 goto fail;
1516 }
1517
1518 requested_ss = ss;
1519 frame_size = pa_frame_size(&ss);
1520
1521 nfrags = m->core->default_n_fragments;
1522 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1523 if (frag_size <= 0)
1524 frag_size = (uint32_t) frame_size;
1525 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1526 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1527
1528 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1529 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1530 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1531 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1532 pa_log("Failed to parse buffer metrics");
1533 goto fail;
1534 }
1535
1536 hwbuf_size = frag_size * nfrags;
1537 period_frames = frag_size/frame_size;
1538 tsched_frames = tsched_size/frame_size;
1539
1540 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1541 pa_log("Failed to parse mmap argument.");
1542 goto fail;
1543 }
1544
1545 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1546 pa_log("Failed to parse tsched argument.");
1547 goto fail;
1548 }
1549
1550 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1551 pa_log("Failed to parse ignore_dB argument.");
1552 goto fail;
1553 }
1554
1555 if (use_tsched && !pa_rtclock_hrtimer()) {
1556 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1557 use_tsched = FALSE;
1558 }
1559
1560 u = pa_xnew0(struct userdata, 1);
1561 u->core = m->core;
1562 u->module = m;
1563 u->use_mmap = use_mmap;
1564 u->use_tsched = use_tsched;
1565 u->first = TRUE;
1566 u->rtpoll = pa_rtpoll_new();
1567 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1568
1569 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1570 usec = pa_rtclock_usec();
1571 pa_smoother_set_time_offset(u->smoother, usec);
1572 pa_smoother_pause(u->smoother, usec);
1573
1574 if (reserve_init(u, pa_modargs_get_value(
1575 ma, "device_id",
1576 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE))) < 0)
1577 goto fail;
1578
1579 b = use_mmap;
1580 d = use_tsched;
1581
1582 if (profile) {
1583
1584 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1585 pa_log("device_id= not set");
1586 goto fail;
1587 }
1588
1589 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1590 dev_id,
1591 &u->device_name,
1592 &ss, &map,
1593 SND_PCM_STREAM_PLAYBACK,
1594 &nfrags, &period_frames, tsched_frames,
1595 &b, &d, profile)))
1596
1597 goto fail;
1598
1599 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1600
1601 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1602 dev_id,
1603 &u->device_name,
1604 &ss, &map,
1605 SND_PCM_STREAM_PLAYBACK,
1606 &nfrags, &period_frames, tsched_frames,
1607 &b, &d, &profile)))
1608
1609 goto fail;
1610
1611 } else {
1612
1613 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1614 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1615 &u->device_name,
1616 &ss, &map,
1617 SND_PCM_STREAM_PLAYBACK,
1618 &nfrags, &period_frames, tsched_frames,
1619 &b, &d, FALSE)))
1620 goto fail;
1621
1622 }
1623
1624 pa_assert(u->device_name);
1625 pa_log_info("Successfully opened device %s.", u->device_name);
1626
1627 if (profile)
1628 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1629
1630 if (use_mmap && !b) {
1631 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1632 u->use_mmap = use_mmap = FALSE;
1633 }
1634
1635 if (use_tsched && (!b || !d)) {
1636 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1637 u->use_tsched = use_tsched = FALSE;
1638 }
1639
1640 if (u->use_mmap)
1641 pa_log_info("Successfully enabled mmap() mode.");
1642
1643 if (u->use_tsched)
1644 pa_log_info("Successfully enabled timer-based scheduling mode.");
1645
1646 /* ALSA might tweak the sample spec, so recalculate the frame size */
1647 frame_size = pa_frame_size(&ss);
1648
1649 pa_alsa_find_mixer_and_elem(u->pcm_handle, &u->mixer_handle, &u->mixer_elem);
1650
1651 pa_sink_new_data_init(&data);
1652 data.driver = driver;
1653 data.module = m;
1654 data.card = card;
1655 set_sink_name(&data, ma, dev_id, u->device_name);
1656 pa_sink_new_data_set_sample_spec(&data, &ss);
1657 pa_sink_new_data_set_channel_map(&data, &map);
1658
1659 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1660 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1661 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1662 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1663 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1664
1665 if (profile) {
1666 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1667 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1668 }
1669
1670 pa_alsa_init_description(data.proplist);
1671
1672 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1673 pa_sink_new_data_done(&data);
1674
1675 if (!u->sink) {
1676 pa_log("Failed to create sink object");
1677 goto fail;
1678 }
1679
1680 u->sink->parent.process_msg = sink_process_msg;
1681 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1682 u->sink->set_state = sink_set_state_cb;
1683 u->sink->userdata = u;
1684
1685 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1686 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1687
1688 u->frame_size = frame_size;
1689 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1690 u->nfragments = nfrags;
1691 u->hwbuf_size = u->fragment_size * nfrags;
1692 u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec);
1693 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1694
1695 if (use_tsched) {
1696 fix_min_sleep_wakeup(u);
1697 fix_tsched_watermark(u);
1698
1699 u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec);
1700 }
1701
1702 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1703 u->sink->thread_info.max_request = u->hwbuf_size;
1704
1705 pa_sink_set_latency_range(u->sink,
1706 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1707 pa_bytes_to_usec(u->hwbuf_size, &ss));
1708
1709 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1710 nfrags, (long unsigned) u->fragment_size,
1711 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1712
1713 if (use_tsched)
1714 pa_log_info("Time scheduling watermark is %0.2fms",
1715 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1716
1717 reserve_update(u);
1718
1719 if (update_sw_params(u) < 0)
1720 goto fail;
1721
1722 if (setup_mixer(u, ignore_dB) < 0)
1723 goto fail;
1724
1725 pa_alsa_dump(u->pcm_handle);
1726
1727 if (!(u->thread = pa_thread_new(thread_func, u))) {
1728 pa_log("Failed to create thread.");
1729 goto fail;
1730 }
1731
1732 /* Get initial mixer settings */
1733 if (data.volume_is_set) {
1734 if (u->sink->set_volume)
1735 u->sink->set_volume(u->sink);
1736 } else {
1737 if (u->sink->get_volume)
1738 u->sink->get_volume(u->sink);
1739 }
1740
1741 if (data.muted_is_set) {
1742 if (u->sink->set_mute)
1743 u->sink->set_mute(u->sink);
1744 } else {
1745 if (u->sink->get_mute)
1746 u->sink->get_mute(u->sink);
1747 }
1748
1749 pa_sink_put(u->sink);
1750
1751 return u->sink;
1752
1753 fail:
1754
1755 userdata_free(u);
1756
1757 return NULL;
1758 }
1759
1760 static void userdata_free(struct userdata *u) {
1761 pa_assert(u);
1762
1763 if (u->sink)
1764 pa_sink_unlink(u->sink);
1765
1766 if (u->thread) {
1767 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1768 pa_thread_free(u->thread);
1769 }
1770
1771 pa_thread_mq_done(&u->thread_mq);
1772
1773 if (u->sink)
1774 pa_sink_unref(u->sink);
1775
1776 if (u->memchunk.memblock)
1777 pa_memblock_unref(u->memchunk.memblock);
1778
1779 if (u->alsa_rtpoll_item)
1780 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1781
1782 if (u->rtpoll)
1783 pa_rtpoll_free(u->rtpoll);
1784
1785 if (u->mixer_fdl)
1786 pa_alsa_fdlist_free(u->mixer_fdl);
1787
1788 if (u->mixer_handle)
1789 snd_mixer_close(u->mixer_handle);
1790
1791 if (u->pcm_handle) {
1792 snd_pcm_drop(u->pcm_handle);
1793 snd_pcm_close(u->pcm_handle);
1794 }
1795
1796 if (u->smoother)
1797 pa_smoother_free(u->smoother);
1798
1799 reserve_done(u);
1800
1801 pa_xfree(u->device_name);
1802 pa_xfree(u);
1803 }
1804
1805 void pa_alsa_sink_free(pa_sink *s) {
1806 struct userdata *u;
1807
1808 pa_sink_assert_ref(s);
1809 pa_assert_se(u = s->userdata);
1810
1811 userdata_free(u);
1812 }