]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
Merge dead branch 'lockfree'
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/xmalloc.h>
32 #include <pulse/util.h>
33 #include <pulse/timeval.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/module.h>
37 #include <pulsecore/memchunk.h>
38 #include <pulsecore/sink.h>
39 #include <pulsecore/modargs.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/thread.h>
45 #include <pulsecore/core-error.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/rtclock.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "alsa-util.h"
52 #include "module-alsa-sink-symdef.h"
53
54 PA_MODULE_AUTHOR("Lennart Poettering");
55 PA_MODULE_DESCRIPTION("ALSA Sink");
56 PA_MODULE_VERSION(PACKAGE_VERSION);
57 PA_MODULE_LOAD_ONCE(FALSE);
58 PA_MODULE_USAGE(
59 "sink_name=<name for the sink> "
60 "device=<ALSA device> "
61 "device_id=<ALSA card index> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map> "
66 "fragments=<number of fragments> "
67 "fragment_size=<fragment size> "
68 "mmap=<enable memory mapping?> "
69 "tsched=<enable system timer based scheduling mode?> "
70 "tsched_buffer_size=<buffer size when using timer based scheduling> "
71 "tsched_buffer_watermark=<lower fill watermark> "
72 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
73
74 static const char* const valid_modargs[] = {
75 "sink_name",
76 "device",
77 "device_id",
78 "format",
79 "rate",
80 "channels",
81 "channel_map",
82 "fragments",
83 "fragment_size",
84 "mmap",
85 "tsched",
86 "tsched_buffer_size",
87 "tsched_buffer_watermark",
88 "mixer_reset",
89 NULL
90 };
91
92 #define DEFAULT_DEVICE "default"
93 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
94 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
95 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
96 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
97
98 struct userdata {
99 pa_core *core;
100 pa_module *module;
101 pa_sink *sink;
102
103 pa_thread *thread;
104 pa_thread_mq thread_mq;
105 pa_rtpoll *rtpoll;
106
107 snd_pcm_t *pcm_handle;
108
109 pa_alsa_fdlist *mixer_fdl;
110 snd_mixer_t *mixer_handle;
111 snd_mixer_elem_t *mixer_elem;
112 long hw_volume_max, hw_volume_min;
113 long hw_dB_max, hw_dB_min;
114 pa_bool_t hw_dB_supported;
115
116 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
117 unsigned nfragments;
118 pa_memchunk memchunk;
119
120 char *device_name;
121
122 pa_bool_t use_mmap, use_tsched;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 int64_t frame_index;
132 uint64_t since_start;
133
134 snd_pcm_sframes_t hwbuf_unused_frames;
135 };
136
137 static void fix_tsched_watermark(struct userdata *u) {
138 size_t max_use;
139 size_t min_sleep, min_wakeup;
140 pa_assert(u);
141
142 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
143
144 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
145 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
146
147 if (min_sleep > max_use/2)
148 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
149 if (min_sleep < u->frame_size)
150 min_sleep = u->frame_size;
151
152 if (min_wakeup > max_use/2)
153 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
154 if (min_wakeup < u->frame_size)
155 min_wakeup = u->frame_size;
156
157 if (u->tsched_watermark > max_use-min_sleep)
158 u->tsched_watermark = max_use-min_sleep;
159
160 if (u->tsched_watermark < min_wakeup)
161 u->tsched_watermark = min_wakeup;
162 }
163
164 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
165 pa_usec_t usec, wm;
166
167 pa_assert(sleep_usec);
168 pa_assert(process_usec);
169
170 pa_assert(u);
171
172 usec = pa_sink_get_requested_latency_within_thread(u->sink);
173
174 if (usec == (pa_usec_t) -1)
175 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
176
177 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
178
179 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
180
181 if (usec >= wm) {
182 *sleep_usec = usec - wm;
183 *process_usec = wm;
184 } else
185 *process_usec = *sleep_usec = usec / 2;
186
187 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
188 }
189
190 static int try_recover(struct userdata *u, const char *call, int err) {
191 pa_assert(u);
192 pa_assert(call);
193 pa_assert(err < 0);
194
195 pa_log_debug("%s: %s", call, snd_strerror(err));
196
197 pa_assert(err != -EAGAIN);
198
199 if (err == -EPIPE)
200 pa_log_debug("%s: Buffer underrun!", call);
201
202 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
203 u->first = TRUE;
204 u->since_start = 0;
205 return 0;
206 }
207
208 pa_log("%s: %s", call, snd_strerror(err));
209 return -1;
210 }
211
212 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
213 size_t left_to_play;
214
215 if (n*u->frame_size < u->hwbuf_size)
216 left_to_play = u->hwbuf_size - (n*u->frame_size);
217 else
218 left_to_play = 0;
219
220 if (left_to_play > 0) {
221 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
222 } else if (!u->first && !u->after_rewind) {
223 pa_log_info("Underrun!");
224
225 if (u->use_tsched) {
226 size_t old_watermark = u->tsched_watermark;
227
228 u->tsched_watermark *= 2;
229 fix_tsched_watermark(u);
230
231 if (old_watermark != u->tsched_watermark)
232 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
233 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
234 }
235 }
236
237 return left_to_play;
238 }
239
240 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
241 int work_done = 0;
242 pa_usec_t max_sleep_usec, process_usec;
243 size_t left_to_play;
244
245 pa_assert(u);
246 pa_sink_assert_ref(u->sink);
247
248 if (u->use_tsched)
249 hw_sleep_time(u, &max_sleep_usec, &process_usec);
250
251 for (;;) {
252 snd_pcm_sframes_t n;
253 int r;
254
255 snd_pcm_hwsync(u->pcm_handle);
256
257 /* First we determine how many samples are missing to fill the
258 * buffer up to 100% */
259
260 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
261
262 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
263 continue;
264
265 return r;
266 }
267
268 left_to_play = check_left_to_play(u, n);
269
270 if (u->use_tsched)
271
272 /* We won't fill up the playback buffer before at least
273 * half the sleep time is over because otherwise we might
274 * ask for more data from the clients then they expect. We
275 * need to guarantee that clients only have to keep around
276 * a single hw buffer length. */
277
278 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
279 break;
280
281 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
282 break;
283
284 n -= u->hwbuf_unused_frames;
285
286 /* pa_log_debug("Filling up"); */
287
288 for (;;) {
289 pa_memchunk chunk;
290 void *p;
291 int err;
292 const snd_pcm_channel_area_t *areas;
293 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
294
295 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
296
297 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
298
299 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
300 continue;
301
302 return r;
303 }
304
305 /* Make sure that if these memblocks need to be copied they will fit into one slot */
306 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
307 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
308
309 /* Check these are multiples of 8 bit */
310 pa_assert((areas[0].first & 7) == 0);
311 pa_assert((areas[0].step & 7)== 0);
312
313 /* We assume a single interleaved memory buffer */
314 pa_assert((areas[0].first >> 3) == 0);
315 pa_assert((areas[0].step >> 3) == u->frame_size);
316
317 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
318
319 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
320 chunk.length = pa_memblock_get_length(chunk.memblock);
321 chunk.index = 0;
322
323 pa_sink_render_into_full(u->sink, &chunk);
324
325 /* FIXME: Maybe we can do something to keep this memory block
326 * a little bit longer around? */
327 pa_memblock_unref_fixed(chunk.memblock);
328
329 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
330
331 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
332 continue;
333
334 return r;
335 }
336
337 work_done = 1;
338
339 u->frame_index += frames;
340 u->since_start += frames * u->frame_size;
341
342 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
343
344 if (frames >= (snd_pcm_uframes_t) n)
345 break;
346
347 n -= frames;
348 }
349 }
350
351 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
352 return work_done;
353 }
354
355 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
356 int work_done = 0;
357 pa_usec_t max_sleep_usec, process_usec;
358 size_t left_to_play;
359
360 pa_assert(u);
361 pa_sink_assert_ref(u->sink);
362
363 if (u->use_tsched)
364 hw_sleep_time(u, &max_sleep_usec, &process_usec);
365
366 for (;;) {
367 snd_pcm_sframes_t n;
368 int r;
369
370 snd_pcm_hwsync(u->pcm_handle);
371
372 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
373
374 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
375 continue;
376
377 return r;
378 }
379
380 left_to_play = check_left_to_play(u, n);
381
382 if (u->use_tsched)
383
384 /* We won't fill up the playback buffer before at least
385 * half the sleep time is over because otherwise we might
386 * ask for more data from the clients then they expect. We
387 * need to guarantee that clients only have to keep around
388 * a single hw buffer length. */
389
390 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
391 break;
392
393 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
394 break;
395
396 n -= u->hwbuf_unused_frames;
397
398 for (;;) {
399 snd_pcm_sframes_t frames;
400 void *p;
401
402 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
403
404 if (u->memchunk.length <= 0)
405 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
406
407 pa_assert(u->memchunk.length > 0);
408
409 frames = u->memchunk.length / u->frame_size;
410
411 if (frames > n)
412 frames = n;
413
414 p = pa_memblock_acquire(u->memchunk.memblock);
415 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
416 pa_memblock_release(u->memchunk.memblock);
417
418 pa_assert(frames != 0);
419
420 if (PA_UNLIKELY(frames < 0)) {
421
422 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
423 continue;
424
425 return r;
426 }
427
428 u->memchunk.index += frames * u->frame_size;
429 u->memchunk.length -= frames * u->frame_size;
430
431 if (u->memchunk.length <= 0) {
432 pa_memblock_unref(u->memchunk.memblock);
433 pa_memchunk_reset(&u->memchunk);
434 }
435
436 work_done = 1;
437
438 u->frame_index += frames;
439 u->since_start += frames * u->frame_size;
440
441 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
442
443 if (frames >= n)
444 break;
445
446 n -= frames;
447 }
448 }
449
450 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
451 return work_done;
452 }
453
454 static void update_smoother(struct userdata *u) {
455 snd_pcm_sframes_t delay = 0;
456 int64_t frames;
457 int err;
458 pa_usec_t now1, now2;
459 /* struct timeval timestamp; */
460 snd_pcm_status_t *status;
461
462 snd_pcm_status_alloca(&status);
463
464 pa_assert(u);
465 pa_assert(u->pcm_handle);
466
467 /* Let's update the time smoother */
468
469 snd_pcm_hwsync(u->pcm_handle);
470 snd_pcm_avail_update(u->pcm_handle);
471
472 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
473 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
474 /* return; */
475 /* } */
476
477 /* delay = snd_pcm_status_get_delay(status); */
478
479 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
480 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
481 return;
482 }
483
484 frames = u->frame_index - delay;
485
486 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
487
488 /* snd_pcm_status_get_tstamp(status, &timestamp); */
489 /* pa_rtclock_from_wallclock(&timestamp); */
490 /* now1 = pa_timeval_load(&timestamp); */
491
492 now1 = pa_rtclock_usec();
493 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
494 pa_smoother_put(u->smoother, now1, now2);
495 }
496
497 static pa_usec_t sink_get_latency(struct userdata *u) {
498 pa_usec_t r = 0;
499 int64_t delay;
500 pa_usec_t now1, now2;
501
502 pa_assert(u);
503
504 now1 = pa_rtclock_usec();
505 now2 = pa_smoother_get(u->smoother, now1);
506
507 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
508
509 if (delay > 0)
510 r = (pa_usec_t) delay;
511
512 if (u->memchunk.memblock)
513 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
514
515 return r;
516 }
517
518 static int build_pollfd(struct userdata *u) {
519 pa_assert(u);
520 pa_assert(u->pcm_handle);
521
522 if (u->alsa_rtpoll_item)
523 pa_rtpoll_item_free(u->alsa_rtpoll_item);
524
525 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
526 return -1;
527
528 return 0;
529 }
530
531 static int suspend(struct userdata *u) {
532 pa_assert(u);
533 pa_assert(u->pcm_handle);
534
535 pa_smoother_pause(u->smoother, pa_rtclock_usec());
536
537 /* Let's suspend */
538 snd_pcm_drain(u->pcm_handle);
539 snd_pcm_close(u->pcm_handle);
540 u->pcm_handle = NULL;
541
542 if (u->alsa_rtpoll_item) {
543 pa_rtpoll_item_free(u->alsa_rtpoll_item);
544 u->alsa_rtpoll_item = NULL;
545 }
546
547 pa_log_info("Device suspended...");
548
549 return 0;
550 }
551
552 static int update_sw_params(struct userdata *u) {
553 snd_pcm_uframes_t avail_min;
554 int err;
555
556 pa_assert(u);
557
558 /* Use the full buffer if noone asked us for anything specific */
559 u->hwbuf_unused_frames = 0;
560
561 if (u->use_tsched) {
562 pa_usec_t latency;
563
564 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
565 size_t b;
566
567 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
568
569 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
570
571 /* We need at least one sample in our buffer */
572
573 if (PA_UNLIKELY(b < u->frame_size))
574 b = u->frame_size;
575
576 u->hwbuf_unused_frames =
577 PA_LIKELY(b < u->hwbuf_size) ?
578 ((u->hwbuf_size - b) / u->frame_size) : 0;
579
580 fix_tsched_watermark(u);
581 }
582 }
583
584 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
585
586 /* We need at last one frame in the used part of the buffer */
587 avail_min = u->hwbuf_unused_frames + 1;
588
589 if (u->use_tsched) {
590 pa_usec_t sleep_usec, process_usec;
591
592 hw_sleep_time(u, &sleep_usec, &process_usec);
593 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
594 }
595
596 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
597
598 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
599 pa_log("Failed to set software parameters: %s", snd_strerror(err));
600 return err;
601 }
602
603 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size);
604
605 return 0;
606 }
607
608 static int unsuspend(struct userdata *u) {
609 pa_sample_spec ss;
610 int err;
611 pa_bool_t b, d;
612 unsigned nfrags;
613 snd_pcm_uframes_t period_size;
614
615 pa_assert(u);
616 pa_assert(!u->pcm_handle);
617
618 pa_log_info("Trying resume...");
619
620 snd_config_update_free_global();
621 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
623 goto fail;
624 }
625
626 ss = u->sink->sample_spec;
627 nfrags = u->nfragments;
628 period_size = u->fragment_size / u->frame_size;
629 b = u->use_mmap;
630 d = u->use_tsched;
631
632 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
634 goto fail;
635 }
636
637 if (b != u->use_mmap || d != u->use_tsched) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
639 goto fail;
640 }
641
642 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
644 goto fail;
645 }
646
647 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
649 goto fail;
650 }
651
652 if (update_sw_params(u) < 0)
653 goto fail;
654
655 if (build_pollfd(u) < 0)
656 goto fail;
657
658 /* FIXME: We need to reload the volume somehow */
659
660 u->first = TRUE;
661 u->since_start = 0;
662
663 pa_log_info("Resumed successfully...");
664
665 return 0;
666
667 fail:
668 if (u->pcm_handle) {
669 snd_pcm_close(u->pcm_handle);
670 u->pcm_handle = NULL;
671 }
672
673 return -1;
674 }
675
676 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
677 struct userdata *u = PA_SINK(o)->userdata;
678
679 switch (code) {
680
681 case PA_SINK_MESSAGE_GET_LATENCY: {
682 pa_usec_t r = 0;
683
684 if (u->pcm_handle)
685 r = sink_get_latency(u);
686
687 *((pa_usec_t*) data) = r;
688
689 return 0;
690 }
691
692 case PA_SINK_MESSAGE_SET_STATE:
693
694 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
695
696 case PA_SINK_SUSPENDED:
697 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
698
699 if (suspend(u) < 0)
700 return -1;
701
702 break;
703
704 case PA_SINK_IDLE:
705 case PA_SINK_RUNNING:
706
707 if (u->sink->thread_info.state == PA_SINK_INIT) {
708 if (build_pollfd(u) < 0)
709 return -1;
710 }
711
712 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
713 if (unsuspend(u) < 0)
714 return -1;
715 }
716
717 break;
718
719 case PA_SINK_UNLINKED:
720 case PA_SINK_INIT:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink);
741 pa_sink_get_mute(u->sink);
742 }
743
744 return 0;
745 }
746
747 static int sink_get_volume_cb(pa_sink *s) {
748 struct userdata *u = s->userdata;
749 int err;
750 int i;
751
752 pa_assert(u);
753 pa_assert(u->mixer_elem);
754
755 for (i = 0; i < s->sample_spec.channels; i++) {
756 long alsa_vol;
757
758 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
759
760 if (u->hw_dB_supported) {
761
762 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
763 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
764 continue;
765 }
766
767 u->hw_dB_supported = FALSE;
768 }
769
770 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
771 goto fail;
772
773 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
774 }
775
776 return 0;
777
778 fail:
779 pa_log_error("Unable to read volume: %s", snd_strerror(err));
780
781 return -1;
782 }
783
784 static int sink_set_volume_cb(pa_sink *s) {
785 struct userdata *u = s->userdata;
786 int err;
787 int i;
788
789 pa_assert(u);
790 pa_assert(u->mixer_elem);
791
792 for (i = 0; i < s->sample_spec.channels; i++) {
793 long alsa_vol;
794 pa_volume_t vol;
795
796 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
797
798 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
799
800 if (u->hw_dB_supported) {
801 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
802 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
803
804 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
805
806 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
807 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
808
809 continue;
810 }
811
812 u->hw_dB_supported = FALSE;
813
814 }
815
816 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
817 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
818
819 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
820 goto fail;
821
822 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
823 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
824 }
825
826 return 0;
827
828 fail:
829 pa_log_error("Unable to set volume: %s", snd_strerror(err));
830
831 return -1;
832 }
833
834 static int sink_get_mute_cb(pa_sink *s) {
835 struct userdata *u = s->userdata;
836 int err, sw;
837
838 pa_assert(u);
839 pa_assert(u->mixer_elem);
840
841 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
842 pa_log_error("Unable to get switch: %s", snd_strerror(err));
843 return -1;
844 }
845
846 s->muted = !sw;
847
848 return 0;
849 }
850
851 static int sink_set_mute_cb(pa_sink *s) {
852 struct userdata *u = s->userdata;
853 int err;
854
855 pa_assert(u);
856 pa_assert(u->mixer_elem);
857
858 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
859 pa_log_error("Unable to set switch: %s", snd_strerror(err));
860 return -1;
861 }
862
863 return 0;
864 }
865
866 static void sink_update_requested_latency_cb(pa_sink *s) {
867 struct userdata *u = s->userdata;
868 snd_pcm_sframes_t before;
869 pa_assert(u);
870
871 if (!u->pcm_handle)
872 return;
873
874 before = u->hwbuf_unused_frames;
875 update_sw_params(u);
876
877 /* Let's check whether we now use only a smaller part of the
878 buffer then before. If so, we need to make sure that subsequent
879 rewinds are relative to the new maxium fill level and not to the
880 current fill level. Thus, let's do a full rewind once, to clear
881 things up. */
882
883 if (u->hwbuf_unused_frames > before) {
884 pa_log_debug("Requesting rewind due to latency change.");
885 pa_sink_request_rewind(s, 0);
886 }
887 }
888
889 static int process_rewind(struct userdata *u) {
890 snd_pcm_sframes_t unused;
891 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
892 pa_assert(u);
893
894 /* Figure out how much we shall rewind and reset the counter */
895 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
896 u->sink->thread_info.rewind_nbytes = 0;
897
898 pa_assert(rewind_nbytes > 0);
899 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
900
901 snd_pcm_hwsync(u->pcm_handle);
902 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
903 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
904 return -1;
905 }
906
907 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
908
909 if (u->hwbuf_size > unused_nbytes)
910 limit_nbytes = u->hwbuf_size - unused_nbytes;
911 else
912 limit_nbytes = 0;
913
914 if (rewind_nbytes > limit_nbytes)
915 rewind_nbytes = limit_nbytes;
916
917 if (rewind_nbytes > 0) {
918 snd_pcm_sframes_t in_frames, out_frames;
919
920 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
921
922 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
923 pa_log_debug("before: %lu", (unsigned long) in_frames);
924 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
925 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
926 return -1;
927 }
928 pa_log_debug("after: %lu", (unsigned long) out_frames);
929
930 rewind_nbytes = out_frames * u->frame_size;
931
932 if (rewind_nbytes <= 0)
933 pa_log_info("Tried rewind, but was apparently not possible.");
934 else {
935 u->frame_index -= out_frames;
936 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
937 pa_sink_process_rewind(u->sink, rewind_nbytes);
938
939 u->after_rewind = TRUE;
940 }
941 } else
942 pa_log_debug("Mhmm, actually there is nothing to rewind.");
943
944 return 0;
945 }
946
947 static void thread_func(void *userdata) {
948 struct userdata *u = userdata;
949
950 pa_assert(u);
951
952 pa_log_debug("Thread starting up");
953
954 if (u->core->realtime_scheduling)
955 pa_make_realtime(u->core->realtime_priority);
956
957 pa_thread_mq_install(&u->thread_mq);
958 pa_rtpoll_install(u->rtpoll);
959
960 for (;;) {
961 int ret;
962
963 /* pa_log_debug("loop"); */
964
965 /* Render some data and write it to the dsp */
966 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
967 int work_done;
968 pa_usec_t sleep_usec;
969
970 if (u->sink->thread_info.rewind_nbytes > 0)
971 if (process_rewind(u) < 0)
972 goto fail;
973
974 if (u->use_mmap)
975 work_done = mmap_write(u, &sleep_usec);
976 else
977 work_done = unix_write(u, &sleep_usec);
978
979 if (work_done < 0)
980 goto fail;
981
982 /* pa_log_debug("work_done = %i", work_done); */
983
984 if (work_done) {
985
986 if (u->first) {
987 pa_log_info("Starting playback.");
988 snd_pcm_start(u->pcm_handle);
989
990 pa_smoother_resume(u->smoother, pa_rtclock_usec());
991 }
992
993 update_smoother(u);
994 }
995
996 if (u->use_tsched) {
997 pa_usec_t cusec;
998
999 if (u->since_start <= u->hwbuf_size) {
1000
1001 /* USB devices on ALSA seem to hit a buffer
1002 * underrun during the first iterations much
1003 * quicker then we calculate here, probably due to
1004 * the transport latency. To accomodate for that
1005 * we artificially decrease the sleep time until
1006 * we have filled the buffer at least once
1007 * completely.*/
1008
1009 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1010 sleep_usec /= 2;
1011 }
1012
1013 /* OK, the playback buffer is now full, let's
1014 * calculate when to wake up next */
1015 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1016
1017 /* Convert from the sound card time domain to the
1018 * system time domain */
1019 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1020
1021 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1022
1023 /* We don't trust the conversion, so we wake up whatever comes first */
1024 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1025 }
1026
1027 u->first = FALSE;
1028 u->after_rewind = FALSE;
1029
1030 } else if (u->use_tsched)
1031
1032 /* OK, we're in an invalid state, let's disable our timers */
1033 pa_rtpoll_set_timer_disabled(u->rtpoll);
1034
1035 /* Hmm, nothing to do. Let's sleep */
1036 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1037 goto fail;
1038
1039 if (ret == 0)
1040 goto finish;
1041
1042 /* Tell ALSA about this and process its response */
1043 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1044 struct pollfd *pollfd;
1045 unsigned short revents = 0;
1046 int err;
1047 unsigned n;
1048
1049 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1050
1051 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1052 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1053 goto fail;
1054 }
1055
1056 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1057 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1058 goto fail;
1059
1060 u->first = TRUE;
1061 u->since_start = 0;
1062 }
1063
1064 if (revents && u->use_tsched)
1065 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1066 }
1067 }
1068
1069 fail:
1070 /* If this was no regular exit from the loop we have to continue
1071 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1072 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1073 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1074
1075 finish:
1076 pa_log_debug("Thread shutting down");
1077 }
1078
1079 int pa__init(pa_module*m) {
1080
1081 pa_modargs *ma = NULL;
1082 struct userdata *u = NULL;
1083 const char *dev_id;
1084 pa_sample_spec ss;
1085 pa_channel_map map;
1086 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1087 snd_pcm_uframes_t period_frames, tsched_frames;
1088 size_t frame_size;
1089 snd_pcm_info_t *pcm_info = NULL;
1090 int err;
1091 const char *name;
1092 char *name_buf = NULL;
1093 pa_bool_t namereg_fail;
1094 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1095 pa_usec_t usec;
1096 pa_sink_new_data data;
1097
1098 snd_pcm_info_alloca(&pcm_info);
1099
1100 pa_assert(m);
1101
1102 pa_alsa_redirect_errors_inc();
1103
1104 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1105 pa_log("Failed to parse module arguments");
1106 goto fail;
1107 }
1108
1109 ss = m->core->default_sample_spec;
1110 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1111 pa_log("Failed to parse sample specification and channel map");
1112 goto fail;
1113 }
1114
1115 frame_size = pa_frame_size(&ss);
1116
1117 nfrags = m->core->default_n_fragments;
1118 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1119 if (frag_size <= 0)
1120 frag_size = frame_size;
1121 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1122 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1123
1124 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1125 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1126 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1127 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1128 pa_log("Failed to parse buffer metrics");
1129 goto fail;
1130 }
1131
1132 hwbuf_size = frag_size * nfrags;
1133 period_frames = frag_size/frame_size;
1134 tsched_frames = tsched_size/frame_size;
1135
1136 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1137 pa_log("Failed to parse mmap argument.");
1138 goto fail;
1139 }
1140
1141 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1142 pa_log("Failed to parse timer_scheduling argument.");
1143 goto fail;
1144 }
1145
1146 if (use_tsched && !pa_rtclock_hrtimer()) {
1147 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1148 use_tsched = FALSE;
1149 }
1150
1151 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1152 pa_log("Failed to parse mixer_reset argument.");
1153 goto fail;
1154 }
1155
1156 u = pa_xnew0(struct userdata, 1);
1157 u->core = m->core;
1158 u->module = m;
1159 m->userdata = u;
1160 u->use_mmap = use_mmap;
1161 u->use_tsched = use_tsched;
1162 u->first = TRUE;
1163 u->since_start = 0;
1164 u->after_rewind = FALSE;
1165 u->rtpoll = pa_rtpoll_new();
1166 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1167 u->alsa_rtpoll_item = NULL;
1168
1169 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1170 usec = pa_rtclock_usec();
1171 pa_smoother_set_time_offset(u->smoother, usec);
1172 pa_smoother_pause(u->smoother, usec);
1173
1174 snd_config_update_free_global();
1175
1176 b = use_mmap;
1177 d = use_tsched;
1178
1179 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1180
1181 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1182 dev_id,
1183 &u->device_name,
1184 &ss, &map,
1185 SND_PCM_STREAM_PLAYBACK,
1186 &nfrags, &period_frames, tsched_frames,
1187 &b, &d)))
1188
1189 goto fail;
1190
1191 } else {
1192
1193 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1194 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1195 &u->device_name,
1196 &ss, &map,
1197 SND_PCM_STREAM_PLAYBACK,
1198 &nfrags, &period_frames, tsched_frames,
1199 &b, &d)))
1200 goto fail;
1201
1202 }
1203
1204 pa_assert(u->device_name);
1205 pa_log_info("Successfully opened device %s.", u->device_name);
1206
1207 if (use_mmap && !b) {
1208 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1209 u->use_mmap = use_mmap = FALSE;
1210 }
1211
1212 if (use_tsched && (!b || !d)) {
1213 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1214 u->use_tsched = use_tsched = FALSE;
1215 }
1216
1217 if (u->use_mmap)
1218 pa_log_info("Successfully enabled mmap() mode.");
1219
1220 if (u->use_tsched)
1221 pa_log_info("Successfully enabled timer-based scheduling mode.");
1222
1223 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1224 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1225 goto fail;
1226 }
1227
1228 /* ALSA might tweak the sample spec, so recalculate the frame size */
1229 frame_size = pa_frame_size(&ss);
1230
1231 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1232 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1233 else {
1234 pa_bool_t found = FALSE;
1235
1236 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1237 found = TRUE;
1238 else {
1239 snd_pcm_info_t *info;
1240
1241 snd_pcm_info_alloca(&info);
1242
1243 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1244 char *md;
1245 int card;
1246
1247 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1248
1249 md = pa_sprintf_malloc("hw:%i", card);
1250
1251 if (strcmp(u->device_name, md))
1252 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1253 found = TRUE;
1254 pa_xfree(md);
1255 }
1256 }
1257 }
1258
1259 if (found)
1260 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1261 found = FALSE;
1262
1263 if (!found) {
1264 snd_mixer_close(u->mixer_handle);
1265 u->mixer_handle = NULL;
1266 }
1267 }
1268
1269 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1270 namereg_fail = TRUE;
1271 else {
1272 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1273 namereg_fail = FALSE;
1274 }
1275
1276 pa_sink_new_data_init(&data);
1277 data.driver = __FILE__;
1278 data.module = m;
1279 pa_sink_new_data_set_name(&data, name);
1280 data.namereg_fail = namereg_fail;
1281 pa_sink_new_data_set_sample_spec(&data, &ss);
1282 pa_sink_new_data_set_channel_map(&data, &map);
1283
1284 pa_alsa_init_proplist(data.proplist, pcm_info);
1285 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1286 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1287 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1288 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1289
1290 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1291 pa_sink_new_data_done(&data);
1292 pa_xfree(name_buf);
1293
1294 if (!u->sink) {
1295 pa_log("Failed to create sink object");
1296 goto fail;
1297 }
1298
1299 u->sink->parent.process_msg = sink_process_msg;
1300 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1301 u->sink->userdata = u;
1302
1303 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1304 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1305
1306 u->frame_size = frame_size;
1307 u->fragment_size = frag_size = period_frames * frame_size;
1308 u->nfragments = nfrags;
1309 u->hwbuf_size = u->fragment_size * nfrags;
1310 u->hwbuf_unused_frames = 0;
1311 u->tsched_watermark = tsched_watermark;
1312 u->frame_index = 0;
1313 u->hw_dB_supported = FALSE;
1314 u->hw_dB_min = u->hw_dB_max = 0;
1315 u->hw_volume_min = u->hw_volume_max = 0;
1316
1317 if (use_tsched)
1318 fix_tsched_watermark(u);
1319
1320 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1321 u->sink->thread_info.max_request = u->hwbuf_size;
1322
1323 pa_sink_set_latency_range(u->sink,
1324 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1325 pa_bytes_to_usec(u->hwbuf_size, &ss));
1326
1327 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1328 nfrags, (long unsigned) u->fragment_size,
1329 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1330
1331 if (use_tsched)
1332 pa_log_info("Time scheduling watermark is %0.2fms",
1333 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1334
1335 if (update_sw_params(u) < 0)
1336 goto fail;
1337
1338 pa_memchunk_reset(&u->memchunk);
1339
1340 if (u->mixer_handle) {
1341 pa_assert(u->mixer_elem);
1342
1343 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1344
1345 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1346 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1347
1348 pa_bool_t suitable = TRUE;
1349
1350 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1351
1352 if (u->hw_volume_min > u->hw_volume_max) {
1353
1354 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1355 suitable = FALSE;
1356
1357 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1358
1359 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1360 suitable = FALSE;
1361
1362 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1363
1364 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1365
1366 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1367
1368 /* Let's see if this thing actually is useful for muting */
1369 if (u->hw_dB_min > -6000) {
1370 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1371
1372 suitable = FALSE;
1373 } else if (u->hw_dB_max < 0) {
1374
1375 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1376 suitable = FALSE;
1377
1378 } else if (u->hw_dB_min >= u->hw_dB_max) {
1379
1380 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1381 suitable = FALSE;
1382
1383 } else {
1384
1385 if (u->hw_dB_max > 0) {
1386 /* dB > 0 means overamplification, and clipping, we don't want that here */
1387 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1388 u->hw_dB_max = 0;
1389 }
1390
1391 u->hw_dB_supported = TRUE;
1392 }
1393 }
1394
1395 if (suitable) {
1396 u->sink->get_volume = sink_get_volume_cb;
1397 u->sink->set_volume = sink_set_volume_cb;
1398 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1399 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1400
1401 } else if (mixer_reset) {
1402 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1403 pa_alsa_0dB_playback(u->mixer_elem);
1404 } else
1405 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1406 }
1407
1408 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1409 u->sink->get_mute = sink_get_mute_cb;
1410 u->sink->set_mute = sink_set_mute_cb;
1411 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1412 }
1413
1414 u->mixer_fdl = pa_alsa_fdlist_new();
1415
1416 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1417 pa_log("Failed to initialize file descriptor monitoring");
1418 goto fail;
1419 }
1420
1421 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1422 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1423 } else
1424 u->mixer_fdl = NULL;
1425
1426 pa_alsa_dump(u->pcm_handle);
1427
1428 if (!(u->thread = pa_thread_new(thread_func, u))) {
1429 pa_log("Failed to create thread.");
1430 goto fail;
1431 }
1432
1433 /* Get initial mixer settings */
1434 if (data.volume_is_set) {
1435 if (u->sink->set_volume)
1436 u->sink->set_volume(u->sink);
1437 } else {
1438 if (u->sink->get_volume)
1439 u->sink->get_volume(u->sink);
1440 }
1441
1442 if (data.muted_is_set) {
1443 if (u->sink->set_mute)
1444 u->sink->set_mute(u->sink);
1445 } else {
1446 if (u->sink->get_mute)
1447 u->sink->get_mute(u->sink);
1448 }
1449
1450 pa_sink_put(u->sink);
1451
1452 pa_modargs_free(ma);
1453
1454 return 0;
1455
1456 fail:
1457
1458 if (ma)
1459 pa_modargs_free(ma);
1460
1461 pa__done(m);
1462
1463 return -1;
1464 }
1465
1466 void pa__done(pa_module*m) {
1467 struct userdata *u;
1468
1469 pa_assert(m);
1470
1471 if (!(u = m->userdata)) {
1472 pa_alsa_redirect_errors_dec();
1473 return;
1474 }
1475
1476 if (u->sink)
1477 pa_sink_unlink(u->sink);
1478
1479 if (u->thread) {
1480 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1481 pa_thread_free(u->thread);
1482 }
1483
1484 pa_thread_mq_done(&u->thread_mq);
1485
1486 if (u->sink)
1487 pa_sink_unref(u->sink);
1488
1489 if (u->memchunk.memblock)
1490 pa_memblock_unref(u->memchunk.memblock);
1491
1492 if (u->alsa_rtpoll_item)
1493 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1494
1495 if (u->rtpoll)
1496 pa_rtpoll_free(u->rtpoll);
1497
1498 if (u->mixer_fdl)
1499 pa_alsa_fdlist_free(u->mixer_fdl);
1500
1501 if (u->mixer_handle)
1502 snd_mixer_close(u->mixer_handle);
1503
1504 if (u->pcm_handle) {
1505 snd_pcm_drop(u->pcm_handle);
1506 snd_pcm_close(u->pcm_handle);
1507 }
1508
1509 if (u->smoother)
1510 pa_smoother_free(u->smoother);
1511
1512 pa_xfree(u->device_name);
1513 pa_xfree(u);
1514
1515 snd_config_update_free_global();
1516
1517 pa_alsa_redirect_errors_dec();
1518 }