]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
fix suspend for alsa sink
[pulseaudio] / src / modules / module-alsa-sink.c
1 /* $Id$ */
2
3 /***
4 This file is part of PulseAudio.
5
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
8
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as published
11 by the Free Software Foundation; either version 2 of the License,
12 or (at your option) any later version.
13
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public License
20 along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 USA.
23 ***/
24
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28
29 #include <stdio.h>
30
31 #include <asoundlib.h>
32
33 #include <pulse/xmalloc.h>
34 #include <pulse/util.h>
35 #include <pulse/timeval.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-util.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/log.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/thread.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/rtclock.h>
51 #include <pulsecore/time-smoother.h>
52
53 #include "alsa-util.h"
54 #include "module-alsa-sink-symdef.h"
55
56 PA_MODULE_AUTHOR("Lennart Poettering");
57 PA_MODULE_DESCRIPTION("ALSA Sink");
58 PA_MODULE_VERSION(PACKAGE_VERSION);
59 PA_MODULE_LOAD_ONCE(FALSE);
60 PA_MODULE_USAGE(
61 "sink_name=<name for the sink> "
62 "device=<ALSA device> "
63 "device_id=<ALSA card index> "
64 "format=<sample format> "
65 "rate=<sample rate> "
66 "channels=<number of channels> "
67 "channel_map=<channel map> "
68 "fragments=<number of fragments> "
69 "fragment_size=<fragment size> "
70 "mmap=<enable memory mapping?> "
71 "tsched=<enable system timer based scheduling mode?> "
72 "tsched_buffer_size=<buffer size when using timer based scheduling> "
73 "tsched_buffer_watermark=<lower fill watermark> "
74 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
75
76 static const char* const valid_modargs[] = {
77 "sink_name",
78 "device",
79 "device_id",
80 "format",
81 "rate",
82 "channels",
83 "channel_map",
84 "fragments",
85 "fragment_size",
86 "mmap",
87 "tsched",
88 "tsched_buffer_size",
89 "tsched_buffer_watermark",
90 "mixer_reset",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (5*PA_USEC_PER_SEC) /* 5s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117
118 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
119 unsigned nfragments;
120 pa_memchunk memchunk;
121
122 char *device_name;
123
124 pa_bool_t use_mmap, use_tsched;
125
126 pa_bool_t first, after_rewind;
127
128 pa_rtpoll_item *alsa_rtpoll_item;
129
130 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
131
132 pa_smoother *smoother;
133 int64_t frame_index;
134 uint64_t since_start;
135
136 snd_pcm_sframes_t hwbuf_unused_frames;
137 };
138
139 static void fix_tsched_watermark(struct userdata *u) {
140 size_t max_use;
141 size_t min_sleep, min_wakeup;
142 pa_assert(u);
143
144 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
145
146 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
147 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
148
149 if (min_sleep > max_use/2)
150 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
151 if (min_sleep < u->frame_size)
152 min_sleep = u->frame_size;
153
154 if (min_wakeup > max_use/2)
155 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
156 if (min_wakeup < u->frame_size)
157 min_wakeup = u->frame_size;
158
159 if (u->tsched_watermark > max_use-min_sleep)
160 u->tsched_watermark = max_use-min_sleep;
161
162 if (u->tsched_watermark < min_wakeup)
163 u->tsched_watermark = min_wakeup;
164 }
165
166 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
167 pa_usec_t usec, wm;
168
169 pa_assert(sleep_usec);
170 pa_assert(process_usec);
171
172 pa_assert(u);
173
174 usec = pa_sink_get_requested_latency_within_thread(u->sink);
175
176 if (usec == (pa_usec_t) -1)
177 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
178
179 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
180
181 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
182
183 if (usec >= wm) {
184 *sleep_usec = usec - wm;
185 *process_usec = wm;
186 } else
187 *process_usec = *sleep_usec = usec / 2;
188
189 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
190 }
191
192 static int try_recover(struct userdata *u, const char *call, int err) {
193 pa_assert(u);
194 pa_assert(call);
195 pa_assert(err < 0);
196
197 pa_log_debug("%s: %s", call, snd_strerror(err));
198
199 pa_assert(err != -EAGAIN);
200
201 if (err == -EPIPE)
202 pa_log_debug("%s: Buffer underrun!", call);
203
204 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
205 u->first = TRUE;
206 u->since_start = 0;
207 return 0;
208 }
209
210 pa_log("%s: %s", call, snd_strerror(err));
211 return -1;
212 }
213
214 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
215 size_t left_to_play;
216
217 if (n*u->frame_size < u->hwbuf_size)
218 left_to_play = u->hwbuf_size - (n*u->frame_size);
219 else
220 left_to_play = 0;
221
222 if (left_to_play > 0) {
223 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
224 } else if (!u->first && !u->after_rewind) {
225 pa_log_info("Underrun!");
226
227 if (u->use_tsched) {
228 size_t old_watermark = u->tsched_watermark;
229
230 u->tsched_watermark *= 2;
231 fix_tsched_watermark(u);
232
233 if (old_watermark != u->tsched_watermark)
234 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
235 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
236 }
237 }
238
239 return left_to_play;
240 }
241
242 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
243 int work_done = 0;
244 pa_usec_t max_sleep_usec, process_usec;
245 size_t left_to_play;
246
247 pa_assert(u);
248 pa_sink_assert_ref(u->sink);
249
250 if (u->use_tsched)
251 hw_sleep_time(u, &max_sleep_usec, &process_usec);
252
253 for (;;) {
254 snd_pcm_sframes_t n;
255 int r;
256
257 snd_pcm_hwsync(u->pcm_handle);
258
259 /* First we determine how many samples are missing to fill the
260 * buffer up to 100% */
261
262 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
263
264 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
265 continue;
266
267 return r;
268 }
269
270 left_to_play = check_left_to_play(u, n);
271
272 if (u->use_tsched)
273
274 /* We won't fill up the playback buffer before at least
275 * half the sleep time is over because otherwise we might
276 * ask for more data from the clients then they expect. We
277 * need to guarantee that clients only have to keep around
278 * a single hw buffer length. */
279
280 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > max_sleep_usec/2)
281 break;
282
283 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
284 break;
285
286 n -= u->hwbuf_unused_frames;
287
288 /* pa_log_debug("Filling up"); */
289
290 for (;;) {
291 pa_memchunk chunk;
292 void *p;
293 int err;
294 const snd_pcm_channel_area_t *areas;
295 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
296
297 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
298
299 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
300
301 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
302 continue;
303
304 return r;
305 }
306
307 /* Make sure that if these memblocks need to be copied they will fit into one slot */
308 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
309 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
310
311 /* Check these are multiples of 8 bit */
312 pa_assert((areas[0].first & 7) == 0);
313 pa_assert((areas[0].step & 7)== 0);
314
315 /* We assume a single interleaved memory buffer */
316 pa_assert((areas[0].first >> 3) == 0);
317 pa_assert((areas[0].step >> 3) == u->frame_size);
318
319 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
320
321 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
322 chunk.length = pa_memblock_get_length(chunk.memblock);
323 chunk.index = 0;
324
325 pa_sink_render_into_full(u->sink, &chunk);
326
327 /* FIXME: Maybe we can do something to keep this memory block
328 * a little bit longer around? */
329 pa_memblock_unref_fixed(chunk.memblock);
330
331 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
332
333 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
334 continue;
335
336 return r;
337 }
338
339 work_done = 1;
340
341 u->frame_index += frames;
342 u->since_start += frames * u->frame_size;
343
344 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
345
346 if (frames >= (snd_pcm_uframes_t) n)
347 break;
348
349 n -= frames;
350 }
351 }
352
353 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
354 return work_done;
355 }
356
357 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
358 int work_done = 0;
359 pa_usec_t max_sleep_usec, process_usec;
360 size_t left_to_play;
361
362 pa_assert(u);
363 pa_sink_assert_ref(u->sink);
364
365 if (u->use_tsched)
366 hw_sleep_time(u, &max_sleep_usec, &process_usec);
367
368 for (;;) {
369 snd_pcm_sframes_t n;
370 int r;
371
372 snd_pcm_hwsync(u->pcm_handle);
373
374 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
375
376 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
377 continue;
378
379 return r;
380 }
381
382 left_to_play = check_left_to_play(u, n);
383
384 if (u->use_tsched)
385
386 /* We won't fill up the playback buffer before at least
387 * half the sleep time is over because otherwise we might
388 * ask for more data from the clients then they expect. We
389 * need to guarantee that clients only have to keep around
390 * a single hw buffer length. */
391
392 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > max_sleep_usec/2)
393 break;
394
395 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
396 break;
397
398 n -= u->hwbuf_unused_frames;
399
400 for (;;) {
401 snd_pcm_sframes_t frames;
402 void *p;
403
404 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
405
406 if (u->memchunk.length <= 0)
407 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
408
409 pa_assert(u->memchunk.length > 0);
410
411 frames = u->memchunk.length / u->frame_size;
412
413 if (frames > n)
414 frames = n;
415
416 p = pa_memblock_acquire(u->memchunk.memblock);
417 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
418 pa_memblock_release(u->memchunk.memblock);
419
420 pa_assert(frames != 0);
421
422 if (PA_UNLIKELY(frames < 0)) {
423
424 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
425 continue;
426
427 return r;
428 }
429
430 u->memchunk.index += frames * u->frame_size;
431 u->memchunk.length -= frames * u->frame_size;
432
433 if (u->memchunk.length <= 0) {
434 pa_memblock_unref(u->memchunk.memblock);
435 pa_memchunk_reset(&u->memchunk);
436 }
437
438 work_done = 1;
439
440 u->frame_index += frames;
441 u->since_start += frames * u->frame_size;
442
443 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
444
445 if (frames >= n)
446 break;
447
448 n -= frames;
449 }
450 }
451
452 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
453 return work_done;
454 }
455
456 static void update_smoother(struct userdata *u) {
457 snd_pcm_sframes_t delay = 0;
458 int64_t frames;
459 int err;
460 pa_usec_t now1, now2;
461 /* struct timeval timestamp; */
462 snd_pcm_status_t *status;
463
464 snd_pcm_status_alloca(&status);
465
466 pa_assert(u);
467 pa_assert(u->pcm_handle);
468
469 /* Let's update the time smoother */
470
471 snd_pcm_hwsync(u->pcm_handle);
472 snd_pcm_avail_update(u->pcm_handle);
473
474 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
475 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
476 /* return; */
477 /* } */
478
479 /* delay = snd_pcm_status_get_delay(status); */
480
481 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
482 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
483 return;
484 }
485
486 frames = u->frame_index - delay;
487
488 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
489
490 /* snd_pcm_status_get_tstamp(status, &timestamp); */
491 /* pa_rtclock_from_wallclock(&timestamp); */
492 /* now1 = pa_timeval_load(&timestamp); */
493
494 now1 = pa_rtclock_usec();
495 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
496 pa_smoother_put(u->smoother, now1, now2);
497 }
498
499 static pa_usec_t sink_get_latency(struct userdata *u) {
500 pa_usec_t r = 0;
501 int64_t delay;
502 pa_usec_t now1, now2;
503
504 pa_assert(u);
505
506 now1 = pa_rtclock_usec();
507 now2 = pa_smoother_get(u->smoother, now1);
508
509 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
510
511 if (delay > 0)
512 r = (pa_usec_t) delay;
513
514 if (u->memchunk.memblock)
515 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
516
517 return r;
518 }
519
520 static int build_pollfd(struct userdata *u) {
521 pa_assert(u);
522 pa_assert(u->pcm_handle);
523
524 if (u->alsa_rtpoll_item)
525 pa_rtpoll_item_free(u->alsa_rtpoll_item);
526
527 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
528 return -1;
529
530 return 0;
531 }
532
533 static int suspend(struct userdata *u) {
534 pa_assert(u);
535 pa_assert(u->pcm_handle);
536
537 pa_smoother_pause(u->smoother, pa_rtclock_usec());
538
539 /* Let's suspend */
540 snd_pcm_drain(u->pcm_handle);
541 snd_pcm_close(u->pcm_handle);
542 u->pcm_handle = NULL;
543
544 if (u->alsa_rtpoll_item) {
545 pa_rtpoll_item_free(u->alsa_rtpoll_item);
546 u->alsa_rtpoll_item = NULL;
547 }
548
549 pa_log_info("Device suspended...");
550
551 return 0;
552 }
553
554 static int update_sw_params(struct userdata *u) {
555 snd_pcm_uframes_t avail_min;
556 int err;
557
558 pa_assert(u);
559
560 /* Use the full buffer if noone asked us for anything specific */
561 u->hwbuf_unused_frames = 0;
562
563 if (u->use_tsched) {
564 pa_usec_t latency;
565
566 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
567 size_t b;
568
569 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
570
571 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
572
573 /* We need at least one sample in our buffer */
574
575 if (PA_UNLIKELY(b < u->frame_size))
576 b = u->frame_size;
577
578 u->hwbuf_unused_frames =
579 PA_LIKELY(b < u->hwbuf_size) ?
580 ((u->hwbuf_size - b) / u->frame_size) : 0;
581
582 fix_tsched_watermark(u);
583 }
584 }
585
586 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
587
588 /* We need at last one frame in the used part of the buffer */
589 avail_min = u->hwbuf_unused_frames + 1;
590
591 if (u->use_tsched) {
592 pa_usec_t sleep_usec, process_usec;
593
594 hw_sleep_time(u, &sleep_usec, &process_usec);
595 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
596 }
597
598 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
599
600 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
601 pa_log("Failed to set software parameters: %s", snd_strerror(err));
602 return err;
603 }
604
605 return 0;
606 }
607
608 static int unsuspend(struct userdata *u) {
609 pa_sample_spec ss;
610 int err;
611 pa_bool_t b, d;
612 unsigned nfrags;
613 snd_pcm_uframes_t period_size;
614
615 pa_assert(u);
616 pa_assert(!u->pcm_handle);
617
618 pa_log_info("Trying resume...");
619
620 snd_config_update_free_global();
621 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
623 goto fail;
624 }
625
626 ss = u->sink->sample_spec;
627 nfrags = u->nfragments;
628 period_size = u->fragment_size / u->frame_size;
629 b = u->use_mmap;
630 d = u->use_tsched;
631
632 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
634 goto fail;
635 }
636
637 if (b != u->use_mmap || d != u->use_tsched) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
639 goto fail;
640 }
641
642 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
644 goto fail;
645 }
646
647 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
649 goto fail;
650 }
651
652 if (update_sw_params(u) < 0)
653 goto fail;
654
655 if (build_pollfd(u) < 0)
656 goto fail;
657
658 /* FIXME: We need to reload the volume somehow */
659
660 u->first = TRUE;
661 u->since_start = 0;
662
663 pa_log_info("Resumed successfully...");
664
665 return 0;
666
667 fail:
668 if (u->pcm_handle) {
669 snd_pcm_close(u->pcm_handle);
670 u->pcm_handle = NULL;
671 }
672
673 return -1;
674 }
675
676 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
677 struct userdata *u = PA_SINK(o)->userdata;
678
679 switch (code) {
680
681 case PA_SINK_MESSAGE_GET_LATENCY: {
682 pa_usec_t r = 0;
683
684 if (u->pcm_handle)
685 r = sink_get_latency(u);
686
687 *((pa_usec_t*) data) = r;
688
689 return 0;
690 }
691
692 case PA_SINK_MESSAGE_SET_STATE:
693
694 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
695
696 case PA_SINK_SUSPENDED:
697 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
698
699 if (suspend(u) < 0)
700 return -1;
701
702 break;
703
704 case PA_SINK_IDLE:
705 case PA_SINK_RUNNING:
706
707 if (u->sink->thread_info.state == PA_SINK_INIT) {
708 if (build_pollfd(u) < 0)
709 return -1;
710 }
711
712 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
713 if (unsuspend(u) < 0)
714 return -1;
715 }
716
717 break;
718
719 case PA_SINK_UNLINKED:
720 case PA_SINK_INIT:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink);
741 pa_sink_get_mute(u->sink);
742 }
743
744 return 0;
745 }
746
747 static int sink_get_volume_cb(pa_sink *s) {
748 struct userdata *u = s->userdata;
749 int err;
750 int i;
751
752 pa_assert(u);
753 pa_assert(u->mixer_elem);
754
755 for (i = 0; i < s->sample_spec.channels; i++) {
756 long alsa_vol;
757
758 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
759
760 if (u->hw_dB_supported) {
761
762 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
763 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
764 continue;
765 }
766
767 u->hw_dB_supported = FALSE;
768 }
769
770 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
771 goto fail;
772
773 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
774 }
775
776 return 0;
777
778 fail:
779 pa_log_error("Unable to read volume: %s", snd_strerror(err));
780
781 return -1;
782 }
783
784 static int sink_set_volume_cb(pa_sink *s) {
785 struct userdata *u = s->userdata;
786 int err;
787 int i;
788
789 pa_assert(u);
790 pa_assert(u->mixer_elem);
791
792 for (i = 0; i < s->sample_spec.channels; i++) {
793 long alsa_vol;
794 pa_volume_t vol;
795
796 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
797
798 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
799
800 if (u->hw_dB_supported) {
801 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
802 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
803
804 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
805
806 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
807 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
808
809 continue;
810 }
811
812 u->hw_dB_supported = FALSE;
813
814 }
815
816 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
817 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
818
819 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
820 goto fail;
821
822 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
823 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
824 }
825
826 return 0;
827
828 fail:
829 pa_log_error("Unable to set volume: %s", snd_strerror(err));
830
831 return -1;
832 }
833
834 static int sink_get_mute_cb(pa_sink *s) {
835 struct userdata *u = s->userdata;
836 int err, sw;
837
838 pa_assert(u);
839 pa_assert(u->mixer_elem);
840
841 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
842 pa_log_error("Unable to get switch: %s", snd_strerror(err));
843 return -1;
844 }
845
846 s->muted = !sw;
847
848 return 0;
849 }
850
851 static int sink_set_mute_cb(pa_sink *s) {
852 struct userdata *u = s->userdata;
853 int err;
854
855 pa_assert(u);
856 pa_assert(u->mixer_elem);
857
858 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
859 pa_log_error("Unable to set switch: %s", snd_strerror(err));
860 return -1;
861 }
862
863 return 0;
864 }
865
866 static void sink_update_requested_latency_cb(pa_sink *s) {
867 struct userdata *u = s->userdata;
868 snd_pcm_sframes_t before;
869 pa_assert(u);
870
871 if (!u->pcm_handle)
872 return;
873
874 before = u->hwbuf_unused_frames;
875 update_sw_params(u);
876
877 /* Let's check whether we now use only a smaller part of the
878 buffer then before. If so, we need to make sure that subsequent
879 rewinds are relative to the new maxium fill level and not to the
880 current fill level. Thus, let's do a full rewind once, to clear
881 things up. */
882
883 if (u->hwbuf_unused_frames > before) {
884 pa_log_debug("Requesting rewind due to latency change.");
885 pa_sink_request_rewind(s, 0);
886 }
887 }
888
889 static int process_rewind(struct userdata *u) {
890 snd_pcm_sframes_t unused;
891 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
892 pa_assert(u);
893
894 /* Figure out how much we shall rewind and reset the counter */
895 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
896 u->sink->thread_info.rewind_nbytes = 0;
897
898 pa_assert(rewind_nbytes > 0);
899 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
900
901 snd_pcm_hwsync(u->pcm_handle);
902 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
903 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
904 return -1;
905 }
906
907 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
908
909 if (u->hwbuf_size > unused_nbytes)
910 limit_nbytes = u->hwbuf_size - unused_nbytes;
911 else
912 limit_nbytes = 0;
913
914 if (rewind_nbytes > limit_nbytes)
915 rewind_nbytes = limit_nbytes;
916
917 if (rewind_nbytes > 0) {
918 snd_pcm_sframes_t in_frames, out_frames;
919
920 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
921
922 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
923 pa_log_debug("before: %lu", (unsigned long) in_frames);
924 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
925 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
926 return -1;
927 }
928 pa_log_debug("after: %lu", (unsigned long) out_frames);
929
930 rewind_nbytes = out_frames * u->frame_size;
931
932 if (rewind_nbytes <= 0)
933 pa_log_info("Tried rewind, but was apparently not possible.");
934 else {
935 u->frame_index -= out_frames;
936 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
937 pa_sink_process_rewind(u->sink, rewind_nbytes);
938
939 u->after_rewind = TRUE;
940 }
941 } else
942 pa_log_debug("Mhmm, actually there is nothing to rewind.");
943
944 return 0;
945 }
946
947 static void thread_func(void *userdata) {
948 struct userdata *u = userdata;
949
950 pa_assert(u);
951
952 pa_log_debug("Thread starting up");
953
954 if (u->core->realtime_scheduling)
955 pa_make_realtime(u->core->realtime_priority);
956
957 pa_thread_mq_install(&u->thread_mq);
958 pa_rtpoll_install(u->rtpoll);
959
960 for (;;) {
961 int ret;
962
963 /* pa_log_debug("loop"); */
964
965 /* Render some data and write it to the dsp */
966 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
967 int work_done;
968 pa_usec_t sleep_usec;
969
970 if (u->sink->thread_info.rewind_nbytes > 0)
971 if (process_rewind(u) < 0)
972 goto fail;
973
974 if (u->use_mmap)
975 work_done = mmap_write(u, &sleep_usec);
976 else
977 work_done = unix_write(u, &sleep_usec);
978
979 if (work_done < 0)
980 goto fail;
981
982 /* pa_log_debug("work_done = %i", work_done); */
983
984 if (work_done) {
985
986 if (u->first) {
987 pa_log_info("Starting playback.");
988 snd_pcm_start(u->pcm_handle);
989
990 pa_smoother_resume(u->smoother, pa_rtclock_usec());
991 }
992
993 update_smoother(u);
994 }
995
996 if (u->use_tsched) {
997 pa_usec_t cusec;
998
999 if (u->since_start <= u->hwbuf_size) {
1000
1001 /* USB devices on ALSA seem to hit a buffer
1002 * underrun during the first iterations much
1003 * quicker then we calculate here, probably due to
1004 * the transport latency. To accomodate for that
1005 * we artificially decrease the sleep time until
1006 * we have filled the buffer at least once
1007 * completely.*/
1008
1009 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1010 sleep_usec /= 2;
1011 }
1012
1013 /* OK, the playback buffer is now full, let's
1014 * calculate when to wake up next */
1015 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1016
1017 /* Convert from the sound card time domain to the
1018 * system time domain */
1019 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1020
1021 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1022
1023 /* We don't trust the conversion, so we wake up whatever comes first */
1024 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1025 }
1026
1027 u->first = FALSE;
1028 u->after_rewind = FALSE;
1029
1030 } else if (u->use_tsched)
1031
1032 /* OK, we're in an invalid state, let's disable our timers */
1033 pa_rtpoll_set_timer_disabled(u->rtpoll);
1034
1035 /* Hmm, nothing to do. Let's sleep */
1036 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1037 goto fail;
1038
1039 if (ret == 0)
1040 goto finish;
1041
1042 /* Tell ALSA about this and process its response */
1043 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1044 struct pollfd *pollfd;
1045 unsigned short revents = 0;
1046 int err;
1047 unsigned n;
1048
1049 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1050
1051 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1052 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1053 goto fail;
1054 }
1055
1056 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1057 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1058 goto fail;
1059
1060 u->first = TRUE;
1061 u->since_start = 0;
1062 }
1063
1064 if (revents && u->use_tsched)
1065 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1066 }
1067 }
1068
1069 fail:
1070 /* If this was no regular exit from the loop we have to continue
1071 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1072 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1073 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1074
1075 finish:
1076 pa_log_debug("Thread shutting down");
1077 }
1078
1079 int pa__init(pa_module*m) {
1080
1081 pa_modargs *ma = NULL;
1082 struct userdata *u = NULL;
1083 const char *dev_id;
1084 pa_sample_spec ss;
1085 pa_channel_map map;
1086 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1087 snd_pcm_uframes_t period_frames, tsched_frames;
1088 size_t frame_size;
1089 snd_pcm_info_t *pcm_info = NULL;
1090 int err;
1091 const char *name;
1092 char *name_buf = NULL;
1093 pa_bool_t namereg_fail;
1094 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1095 pa_usec_t usec;
1096 pa_sink_new_data data;
1097
1098 snd_pcm_info_alloca(&pcm_info);
1099
1100 pa_assert(m);
1101
1102 pa_alsa_redirect_errors_inc();
1103
1104 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1105 pa_log("Failed to parse module arguments");
1106 goto fail;
1107 }
1108
1109 ss = m->core->default_sample_spec;
1110 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1111 pa_log("Failed to parse sample specification and channel map");
1112 goto fail;
1113 }
1114
1115 frame_size = pa_frame_size(&ss);
1116
1117 nfrags = m->core->default_n_fragments;
1118 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1119 if (frag_size <= 0)
1120 frag_size = frame_size;
1121 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1122 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1123
1124 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1125 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1126 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1127 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1128 pa_log("Failed to parse buffer metrics");
1129 goto fail;
1130 }
1131
1132 hwbuf_size = frag_size * nfrags;
1133 period_frames = frag_size/frame_size;
1134 tsched_frames = tsched_size/frame_size;
1135
1136 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1137 pa_log("Failed to parse mmap argument.");
1138 goto fail;
1139 }
1140
1141 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1142 pa_log("Failed to parse timer_scheduling argument.");
1143 goto fail;
1144 }
1145
1146 if (use_tsched && !pa_rtclock_hrtimer()) {
1147 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1148 use_tsched = FALSE;
1149 }
1150
1151 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1152 pa_log("Failed to parse mixer_reset argument.");
1153 goto fail;
1154 }
1155
1156 u = pa_xnew0(struct userdata, 1);
1157 u->core = m->core;
1158 u->module = m;
1159 m->userdata = u;
1160 u->use_mmap = use_mmap;
1161 u->use_tsched = use_tsched;
1162 u->first = TRUE;
1163 u->since_start = 0;
1164 u->after_rewind = FALSE;
1165 u->rtpoll = pa_rtpoll_new();
1166 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1167 u->alsa_rtpoll_item = NULL;
1168
1169 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1170 usec = pa_rtclock_usec();
1171 pa_smoother_set_time_offset(u->smoother, usec);
1172 pa_smoother_pause(u->smoother, usec);
1173
1174 snd_config_update_free_global();
1175
1176 b = use_mmap;
1177 d = use_tsched;
1178
1179 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1180
1181 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1182 dev_id,
1183 &u->device_name,
1184 &ss, &map,
1185 SND_PCM_STREAM_PLAYBACK,
1186 &nfrags, &period_frames, tsched_frames,
1187 &b, &d)))
1188
1189 goto fail;
1190
1191 } else {
1192
1193 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1194 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1195 &u->device_name,
1196 &ss, &map,
1197 SND_PCM_STREAM_PLAYBACK,
1198 &nfrags, &period_frames, tsched_frames,
1199 &b, &d)))
1200 goto fail;
1201
1202 }
1203
1204 pa_assert(u->device_name);
1205 pa_log_info("Successfully opened device %s.", u->device_name);
1206
1207 if (use_mmap && !b) {
1208 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1209 u->use_mmap = use_mmap = FALSE;
1210 }
1211
1212 if (use_tsched && (!b || !d)) {
1213 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1214 u->use_tsched = use_tsched = FALSE;
1215 }
1216
1217 if (u->use_mmap)
1218 pa_log_info("Successfully enabled mmap() mode.");
1219
1220 if (u->use_tsched)
1221 pa_log_info("Successfully enabled timer-based scheduling mode.");
1222
1223 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1224 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1225 goto fail;
1226 }
1227
1228 /* ALSA might tweak the sample spec, so recalculate the frame size */
1229 frame_size = pa_frame_size(&ss);
1230
1231 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1232 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1233 else {
1234 pa_bool_t found = FALSE;
1235
1236 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1237 found = TRUE;
1238 else {
1239 snd_pcm_info_t *info;
1240
1241 snd_pcm_info_alloca(&info);
1242
1243 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1244 char *md;
1245 int card;
1246
1247 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1248
1249 md = pa_sprintf_malloc("hw:%i", card);
1250
1251 if (strcmp(u->device_name, md))
1252 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1253 found = TRUE;
1254 pa_xfree(md);
1255 }
1256 }
1257 }
1258
1259 if (found)
1260 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1261 found = FALSE;
1262
1263 if (!found) {
1264 snd_mixer_close(u->mixer_handle);
1265 u->mixer_handle = NULL;
1266 }
1267 }
1268
1269 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1270 namereg_fail = TRUE;
1271 else {
1272 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1273 namereg_fail = FALSE;
1274 }
1275
1276 pa_sink_new_data_init(&data);
1277 data.driver = __FILE__;
1278 data.module = m;
1279 pa_sink_new_data_set_name(&data, name);
1280 data.namereg_fail = namereg_fail;
1281 pa_sink_new_data_set_sample_spec(&data, &ss);
1282 pa_sink_new_data_set_channel_map(&data, &map);
1283
1284 pa_alsa_init_proplist(data.proplist, pcm_info);
1285 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1286 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1287 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1288 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1289
1290 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1291 pa_sink_new_data_done(&data);
1292 pa_xfree(name_buf);
1293
1294 if (!u->sink) {
1295 pa_log("Failed to create sink object");
1296 goto fail;
1297 }
1298
1299 u->sink->parent.process_msg = sink_process_msg;
1300 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1301 u->sink->userdata = u;
1302
1303 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1304 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1305
1306 u->frame_size = frame_size;
1307 u->fragment_size = frag_size = period_frames * frame_size;
1308 u->nfragments = nfrags;
1309 u->hwbuf_size = u->fragment_size * nfrags;
1310 u->hwbuf_unused_frames = 0;
1311 u->tsched_watermark = tsched_watermark;
1312 u->frame_index = 0;
1313 u->hw_dB_supported = FALSE;
1314 u->hw_dB_min = u->hw_dB_max = 0;
1315 u->hw_volume_min = u->hw_volume_max = 0;
1316
1317 if (use_tsched)
1318 fix_tsched_watermark(u);
1319
1320 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1321 u->sink->max_latency = pa_bytes_to_usec(u->hwbuf_size, &ss);
1322 if (!use_tsched)
1323 u->sink->min_latency = u->sink->max_latency;
1324
1325 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1326 nfrags, (long unsigned) u->fragment_size,
1327 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1328
1329 if (use_tsched)
1330 pa_log_info("Time scheduling watermark is %0.2fms",
1331 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1332
1333 if (update_sw_params(u) < 0)
1334 goto fail;
1335
1336 pa_memchunk_reset(&u->memchunk);
1337
1338 if (u->mixer_handle) {
1339 pa_assert(u->mixer_elem);
1340
1341 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1342
1343 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1344 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1345
1346 pa_bool_t suitable = TRUE;
1347
1348 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1349
1350 if (u->hw_volume_min > u->hw_volume_max) {
1351
1352 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1353 suitable = FALSE;
1354
1355 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1356
1357 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1358 suitable = FALSE;
1359
1360 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1361
1362 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1363
1364 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1365
1366 /* Let's see if this thing actually is useful for muting */
1367 if (u->hw_dB_min > -6000) {
1368 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1369
1370 suitable = FALSE;
1371 } else if (u->hw_dB_max < 0) {
1372
1373 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1374 suitable = FALSE;
1375
1376 } else if (u->hw_dB_min >= u->hw_dB_max) {
1377
1378 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1379 suitable = FALSE;
1380
1381 } else {
1382
1383 if (u->hw_dB_max > 0) {
1384 /* dB > 0 means overamplification, and clipping, we don't want that here */
1385 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1386 u->hw_dB_max = 0;
1387 }
1388
1389 u->hw_dB_supported = TRUE;
1390 }
1391 }
1392
1393 if (suitable) {
1394 u->sink->get_volume = sink_get_volume_cb;
1395 u->sink->set_volume = sink_set_volume_cb;
1396 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1397 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1398
1399 } else if (mixer_reset) {
1400 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1401 pa_alsa_0dB_playback(u->mixer_elem);
1402 } else
1403 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1404 }
1405
1406 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1407 u->sink->get_mute = sink_get_mute_cb;
1408 u->sink->set_mute = sink_set_mute_cb;
1409 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1410 }
1411
1412 u->mixer_fdl = pa_alsa_fdlist_new();
1413
1414 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1415 pa_log("Failed to initialize file descriptor monitoring");
1416 goto fail;
1417 }
1418
1419 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1420 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1421 } else
1422 u->mixer_fdl = NULL;
1423
1424 pa_alsa_dump(u->pcm_handle);
1425
1426 if (!(u->thread = pa_thread_new(thread_func, u))) {
1427 pa_log("Failed to create thread.");
1428 goto fail;
1429 }
1430
1431 /* Get initial mixer settings */
1432 if (data.volume_is_set) {
1433 if (u->sink->set_volume)
1434 u->sink->set_volume(u->sink);
1435 } else {
1436 if (u->sink->get_volume)
1437 u->sink->get_volume(u->sink);
1438 }
1439
1440 if (data.muted_is_set) {
1441 if (u->sink->set_mute)
1442 u->sink->set_mute(u->sink);
1443 } else {
1444 if (u->sink->get_mute)
1445 u->sink->get_mute(u->sink);
1446 }
1447
1448 pa_sink_put(u->sink);
1449
1450 pa_modargs_free(ma);
1451
1452 return 0;
1453
1454 fail:
1455
1456 if (ma)
1457 pa_modargs_free(ma);
1458
1459 pa__done(m);
1460
1461 return -1;
1462 }
1463
1464 void pa__done(pa_module*m) {
1465 struct userdata *u;
1466
1467 pa_assert(m);
1468
1469 if (!(u = m->userdata)) {
1470 pa_alsa_redirect_errors_dec();
1471 return;
1472 }
1473
1474 if (u->sink)
1475 pa_sink_unlink(u->sink);
1476
1477 if (u->thread) {
1478 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1479 pa_thread_free(u->thread);
1480 }
1481
1482 pa_thread_mq_done(&u->thread_mq);
1483
1484 if (u->sink)
1485 pa_sink_unref(u->sink);
1486
1487 if (u->memchunk.memblock)
1488 pa_memblock_unref(u->memchunk.memblock);
1489
1490 if (u->alsa_rtpoll_item)
1491 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1492
1493 if (u->rtpoll)
1494 pa_rtpoll_free(u->rtpoll);
1495
1496 if (u->mixer_fdl)
1497 pa_alsa_fdlist_free(u->mixer_fdl);
1498
1499 if (u->mixer_handle)
1500 snd_mixer_close(u->mixer_handle);
1501
1502 if (u->pcm_handle) {
1503 snd_pcm_drop(u->pcm_handle);
1504 snd_pcm_close(u->pcm_handle);
1505 }
1506
1507 if (u->smoother)
1508 pa_smoother_free(u->smoother);
1509
1510 pa_xfree(u->device_name);
1511 pa_xfree(u);
1512
1513 snd_config_update_free_global();
1514
1515 pa_alsa_redirect_errors_dec();
1516 }