]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
make sure we call pa_sink_process_rewind() under all circumstances if a rewind was...
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/xmalloc.h>
32 #include <pulse/util.h>
33 #include <pulse/timeval.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/module.h>
37 #include <pulsecore/memchunk.h>
38 #include <pulsecore/sink.h>
39 #include <pulsecore/modargs.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/thread.h>
45 #include <pulsecore/core-error.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/rtclock.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "alsa-util.h"
52 #include "module-alsa-sink-symdef.h"
53
54 PA_MODULE_AUTHOR("Lennart Poettering");
55 PA_MODULE_DESCRIPTION("ALSA Sink");
56 PA_MODULE_VERSION(PACKAGE_VERSION);
57 PA_MODULE_LOAD_ONCE(FALSE);
58 PA_MODULE_USAGE(
59 "sink_name=<name for the sink> "
60 "device=<ALSA device> "
61 "device_id=<ALSA card index> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map> "
66 "fragments=<number of fragments> "
67 "fragment_size=<fragment size> "
68 "mmap=<enable memory mapping?> "
69 "tsched=<enable system timer based scheduling mode?> "
70 "tsched_buffer_size=<buffer size when using timer based scheduling> "
71 "tsched_buffer_watermark=<lower fill watermark> "
72 "mixer_reset=<reset hw volume and mute settings to sane defaults when falling back to software?>");
73
74 static const char* const valid_modargs[] = {
75 "sink_name",
76 "device",
77 "device_id",
78 "format",
79 "rate",
80 "channels",
81 "channel_map",
82 "fragments",
83 "fragment_size",
84 "mmap",
85 "tsched",
86 "tsched_buffer_size",
87 "tsched_buffer_watermark",
88 "mixer_reset",
89 NULL
90 };
91
92 #define DEFAULT_DEVICE "default"
93 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
94 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
95 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
96 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
97
98 struct userdata {
99 pa_core *core;
100 pa_module *module;
101 pa_sink *sink;
102
103 pa_thread *thread;
104 pa_thread_mq thread_mq;
105 pa_rtpoll *rtpoll;
106
107 snd_pcm_t *pcm_handle;
108
109 pa_alsa_fdlist *mixer_fdl;
110 snd_mixer_t *mixer_handle;
111 snd_mixer_elem_t *mixer_elem;
112 long hw_volume_max, hw_volume_min;
113 long hw_dB_max, hw_dB_min;
114 pa_bool_t hw_dB_supported;
115
116 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
117 unsigned nfragments;
118 pa_memchunk memchunk;
119
120 char *device_name;
121
122 pa_bool_t use_mmap, use_tsched;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 int64_t frame_index;
132 uint64_t since_start;
133
134 snd_pcm_sframes_t hwbuf_unused_frames;
135 };
136
137 static void fix_tsched_watermark(struct userdata *u) {
138 size_t max_use;
139 size_t min_sleep, min_wakeup;
140 pa_assert(u);
141
142 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
143
144 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
145 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
146
147 if (min_sleep > max_use/2)
148 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
149 if (min_sleep < u->frame_size)
150 min_sleep = u->frame_size;
151
152 if (min_wakeup > max_use/2)
153 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
154 if (min_wakeup < u->frame_size)
155 min_wakeup = u->frame_size;
156
157 if (u->tsched_watermark > max_use-min_sleep)
158 u->tsched_watermark = max_use-min_sleep;
159
160 if (u->tsched_watermark < min_wakeup)
161 u->tsched_watermark = min_wakeup;
162 }
163
164 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
165 pa_usec_t usec, wm;
166
167 pa_assert(sleep_usec);
168 pa_assert(process_usec);
169
170 pa_assert(u);
171
172 usec = pa_sink_get_requested_latency_within_thread(u->sink);
173
174 if (usec == (pa_usec_t) -1)
175 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
176
177 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
178
179 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
180
181 if (usec >= wm) {
182 *sleep_usec = usec - wm;
183 *process_usec = wm;
184 } else
185 *process_usec = *sleep_usec = usec / 2;
186
187 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
188 }
189
190 static int try_recover(struct userdata *u, const char *call, int err) {
191 pa_assert(u);
192 pa_assert(call);
193 pa_assert(err < 0);
194
195 pa_log_debug("%s: %s", call, snd_strerror(err));
196
197 pa_assert(err != -EAGAIN);
198
199 if (err == -EPIPE)
200 pa_log_debug("%s: Buffer underrun!", call);
201
202 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
203 u->first = TRUE;
204 u->since_start = 0;
205 return 0;
206 }
207
208 pa_log("%s: %s", call, snd_strerror(err));
209 return -1;
210 }
211
212 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
213 size_t left_to_play;
214
215 if (n*u->frame_size < u->hwbuf_size)
216 left_to_play = u->hwbuf_size - (n*u->frame_size);
217 else
218 left_to_play = 0;
219
220 if (left_to_play > 0) {
221 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
222 } else if (!u->first && !u->after_rewind) {
223 pa_log_info("Underrun!");
224
225 if (u->use_tsched) {
226 size_t old_watermark = u->tsched_watermark;
227
228 u->tsched_watermark *= 2;
229 fix_tsched_watermark(u);
230
231 if (old_watermark != u->tsched_watermark)
232 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
233 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
234 }
235 }
236
237 return left_to_play;
238 }
239
240 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
241 int work_done = 0;
242 pa_usec_t max_sleep_usec, process_usec;
243 size_t left_to_play;
244
245 pa_assert(u);
246 pa_sink_assert_ref(u->sink);
247
248 if (u->use_tsched)
249 hw_sleep_time(u, &max_sleep_usec, &process_usec);
250
251 for (;;) {
252 snd_pcm_sframes_t n;
253 int r;
254
255 snd_pcm_hwsync(u->pcm_handle);
256
257 /* First we determine how many samples are missing to fill the
258 * buffer up to 100% */
259
260 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
261
262 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
263 continue;
264
265 return r;
266 }
267
268 left_to_play = check_left_to_play(u, n);
269
270 if (u->use_tsched)
271
272 /* We won't fill up the playback buffer before at least
273 * half the sleep time is over because otherwise we might
274 * ask for more data from the clients then they expect. We
275 * need to guarantee that clients only have to keep around
276 * a single hw buffer length. */
277
278 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
279 break;
280
281 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
282 break;
283
284 n -= u->hwbuf_unused_frames;
285
286 /* pa_log_debug("Filling up"); */
287
288 for (;;) {
289 pa_memchunk chunk;
290 void *p;
291 int err;
292 const snd_pcm_channel_area_t *areas;
293 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
294
295 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
296
297 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
298
299 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
300 continue;
301
302 return r;
303 }
304
305 /* Make sure that if these memblocks need to be copied they will fit into one slot */
306 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
307 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
308
309 /* Check these are multiples of 8 bit */
310 pa_assert((areas[0].first & 7) == 0);
311 pa_assert((areas[0].step & 7)== 0);
312
313 /* We assume a single interleaved memory buffer */
314 pa_assert((areas[0].first >> 3) == 0);
315 pa_assert((areas[0].step >> 3) == u->frame_size);
316
317 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
318
319 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
320 chunk.length = pa_memblock_get_length(chunk.memblock);
321 chunk.index = 0;
322
323 pa_sink_render_into_full(u->sink, &chunk);
324
325 /* FIXME: Maybe we can do something to keep this memory block
326 * a little bit longer around? */
327 pa_memblock_unref_fixed(chunk.memblock);
328
329 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
330
331 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
332 continue;
333
334 return r;
335 }
336
337 work_done = 1;
338
339 u->frame_index += frames;
340 u->since_start += frames * u->frame_size;
341
342 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
343
344 if (frames >= (snd_pcm_uframes_t) n)
345 break;
346
347 n -= frames;
348 }
349 }
350
351 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
352 return work_done;
353 }
354
355 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
356 int work_done = 0;
357 pa_usec_t max_sleep_usec, process_usec;
358 size_t left_to_play;
359
360 pa_assert(u);
361 pa_sink_assert_ref(u->sink);
362
363 if (u->use_tsched)
364 hw_sleep_time(u, &max_sleep_usec, &process_usec);
365
366 for (;;) {
367 snd_pcm_sframes_t n;
368 int r;
369
370 snd_pcm_hwsync(u->pcm_handle);
371
372 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
373
374 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
375 continue;
376
377 return r;
378 }
379
380 left_to_play = check_left_to_play(u, n);
381
382 if (u->use_tsched)
383
384 /* We won't fill up the playback buffer before at least
385 * half the sleep time is over because otherwise we might
386 * ask for more data from the clients then they expect. We
387 * need to guarantee that clients only have to keep around
388 * a single hw buffer length. */
389
390 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
391 break;
392
393 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
394 break;
395
396 n -= u->hwbuf_unused_frames;
397
398 for (;;) {
399 snd_pcm_sframes_t frames;
400 void *p;
401
402 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
403
404 if (u->memchunk.length <= 0)
405 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
406
407 pa_assert(u->memchunk.length > 0);
408
409 frames = u->memchunk.length / u->frame_size;
410
411 if (frames > n)
412 frames = n;
413
414 p = pa_memblock_acquire(u->memchunk.memblock);
415 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
416 pa_memblock_release(u->memchunk.memblock);
417
418 pa_assert(frames != 0);
419
420 if (PA_UNLIKELY(frames < 0)) {
421
422 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
423 continue;
424
425 return r;
426 }
427
428 u->memchunk.index += frames * u->frame_size;
429 u->memchunk.length -= frames * u->frame_size;
430
431 if (u->memchunk.length <= 0) {
432 pa_memblock_unref(u->memchunk.memblock);
433 pa_memchunk_reset(&u->memchunk);
434 }
435
436 work_done = 1;
437
438 u->frame_index += frames;
439 u->since_start += frames * u->frame_size;
440
441 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
442
443 if (frames >= n)
444 break;
445
446 n -= frames;
447 }
448 }
449
450 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
451 return work_done;
452 }
453
454 static void update_smoother(struct userdata *u) {
455 snd_pcm_sframes_t delay = 0;
456 int64_t frames;
457 int err;
458 pa_usec_t now1, now2;
459 /* struct timeval timestamp; */
460 snd_pcm_status_t *status;
461
462 snd_pcm_status_alloca(&status);
463
464 pa_assert(u);
465 pa_assert(u->pcm_handle);
466
467 /* Let's update the time smoother */
468
469 snd_pcm_hwsync(u->pcm_handle);
470 snd_pcm_avail_update(u->pcm_handle);
471
472 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
473 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
474 /* return; */
475 /* } */
476
477 /* delay = snd_pcm_status_get_delay(status); */
478
479 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
480 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
481 return;
482 }
483
484 frames = u->frame_index - delay;
485
486 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
487
488 /* snd_pcm_status_get_tstamp(status, &timestamp); */
489 /* pa_rtclock_from_wallclock(&timestamp); */
490 /* now1 = pa_timeval_load(&timestamp); */
491
492 now1 = pa_rtclock_usec();
493 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
494 pa_smoother_put(u->smoother, now1, now2);
495 }
496
497 static pa_usec_t sink_get_latency(struct userdata *u) {
498 pa_usec_t r = 0;
499 int64_t delay;
500 pa_usec_t now1, now2;
501
502 pa_assert(u);
503
504 now1 = pa_rtclock_usec();
505 now2 = pa_smoother_get(u->smoother, now1);
506
507 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
508
509 if (delay > 0)
510 r = (pa_usec_t) delay;
511
512 if (u->memchunk.memblock)
513 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
514
515 return r;
516 }
517
518 static int build_pollfd(struct userdata *u) {
519 pa_assert(u);
520 pa_assert(u->pcm_handle);
521
522 if (u->alsa_rtpoll_item)
523 pa_rtpoll_item_free(u->alsa_rtpoll_item);
524
525 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
526 return -1;
527
528 return 0;
529 }
530
531 static int suspend(struct userdata *u) {
532 pa_assert(u);
533 pa_assert(u->pcm_handle);
534
535 pa_smoother_pause(u->smoother, pa_rtclock_usec());
536
537 /* Let's suspend */
538 snd_pcm_drain(u->pcm_handle);
539 snd_pcm_close(u->pcm_handle);
540 u->pcm_handle = NULL;
541
542 if (u->alsa_rtpoll_item) {
543 pa_rtpoll_item_free(u->alsa_rtpoll_item);
544 u->alsa_rtpoll_item = NULL;
545 }
546
547 pa_log_info("Device suspended...");
548
549 return 0;
550 }
551
552 static int update_sw_params(struct userdata *u) {
553 snd_pcm_uframes_t avail_min;
554 int err;
555
556 pa_assert(u);
557
558 /* Use the full buffer if noone asked us for anything specific */
559 u->hwbuf_unused_frames = 0;
560
561 if (u->use_tsched) {
562 pa_usec_t latency;
563
564 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
565 size_t b;
566
567 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
568
569 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
570
571 /* We need at least one sample in our buffer */
572
573 if (PA_UNLIKELY(b < u->frame_size))
574 b = u->frame_size;
575
576 u->hwbuf_unused_frames =
577 PA_LIKELY(b < u->hwbuf_size) ?
578 ((u->hwbuf_size - b) / u->frame_size) : 0;
579
580 fix_tsched_watermark(u);
581 }
582 }
583
584 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
585
586 /* We need at last one frame in the used part of the buffer */
587 avail_min = u->hwbuf_unused_frames + 1;
588
589 if (u->use_tsched) {
590 pa_usec_t sleep_usec, process_usec;
591
592 hw_sleep_time(u, &sleep_usec, &process_usec);
593 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
594 }
595
596 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
597
598 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
599 pa_log("Failed to set software parameters: %s", snd_strerror(err));
600 return err;
601 }
602
603 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size);
604
605 return 0;
606 }
607
608 static int unsuspend(struct userdata *u) {
609 pa_sample_spec ss;
610 int err;
611 pa_bool_t b, d;
612 unsigned nfrags;
613 snd_pcm_uframes_t period_size;
614
615 pa_assert(u);
616 pa_assert(!u->pcm_handle);
617
618 pa_log_info("Trying resume...");
619
620 snd_config_update_free_global();
621 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
623 goto fail;
624 }
625
626 ss = u->sink->sample_spec;
627 nfrags = u->nfragments;
628 period_size = u->fragment_size / u->frame_size;
629 b = u->use_mmap;
630 d = u->use_tsched;
631
632 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
634 goto fail;
635 }
636
637 if (b != u->use_mmap || d != u->use_tsched) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
639 goto fail;
640 }
641
642 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
644 goto fail;
645 }
646
647 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
649 goto fail;
650 }
651
652 if (update_sw_params(u) < 0)
653 goto fail;
654
655 if (build_pollfd(u) < 0)
656 goto fail;
657
658 /* FIXME: We need to reload the volume somehow */
659
660 u->first = TRUE;
661 u->since_start = 0;
662
663 pa_log_info("Resumed successfully...");
664
665 return 0;
666
667 fail:
668 if (u->pcm_handle) {
669 snd_pcm_close(u->pcm_handle);
670 u->pcm_handle = NULL;
671 }
672
673 return -1;
674 }
675
676 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
677 struct userdata *u = PA_SINK(o)->userdata;
678
679 switch (code) {
680
681 case PA_SINK_MESSAGE_GET_LATENCY: {
682 pa_usec_t r = 0;
683
684 if (u->pcm_handle)
685 r = sink_get_latency(u);
686
687 *((pa_usec_t*) data) = r;
688
689 return 0;
690 }
691
692 case PA_SINK_MESSAGE_SET_STATE:
693
694 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
695
696 case PA_SINK_SUSPENDED:
697 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
698
699 if (suspend(u) < 0)
700 return -1;
701
702 break;
703
704 case PA_SINK_IDLE:
705 case PA_SINK_RUNNING:
706
707 if (u->sink->thread_info.state == PA_SINK_INIT) {
708 if (build_pollfd(u) < 0)
709 return -1;
710 }
711
712 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
713 if (unsuspend(u) < 0)
714 return -1;
715 }
716
717 break;
718
719 case PA_SINK_UNLINKED:
720 case PA_SINK_INIT:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink);
741 pa_sink_get_mute(u->sink);
742 }
743
744 return 0;
745 }
746
747 static int sink_get_volume_cb(pa_sink *s) {
748 struct userdata *u = s->userdata;
749 int err;
750 int i;
751
752 pa_assert(u);
753 pa_assert(u->mixer_elem);
754
755 for (i = 0; i < s->sample_spec.channels; i++) {
756 long alsa_vol;
757
758 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
759
760 if (u->hw_dB_supported) {
761
762 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) >= 0) {
763 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
764 continue;
765 }
766
767 u->hw_dB_supported = FALSE;
768 }
769
770 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
771 goto fail;
772
773 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
774 }
775
776 return 0;
777
778 fail:
779 pa_log_error("Unable to read volume: %s", snd_strerror(err));
780
781 return -1;
782 }
783
784 static int sink_set_volume_cb(pa_sink *s) {
785 struct userdata *u = s->userdata;
786 int err;
787 int i;
788
789 pa_assert(u);
790 pa_assert(u->mixer_elem);
791
792 for (i = 0; i < s->sample_spec.channels; i++) {
793 long alsa_vol;
794 pa_volume_t vol;
795
796 pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
797
798 vol = PA_MIN(s->volume.values[i], PA_VOLUME_NORM);
799
800 if (u->hw_dB_supported) {
801 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
802 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
803
804 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, -1)) >= 0) {
805
806 if (snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
807 s->volume.values[i] = pa_sw_volume_from_dB(alsa_vol / 100.0);
808
809 continue;
810 }
811
812 u->hw_dB_supported = FALSE;
813
814 }
815
816 alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
817 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
818
819 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
820 goto fail;
821
822 if (snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol) >= 0)
823 s->volume.values[i] = (pa_volume_t) roundf(((float) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
824 }
825
826 return 0;
827
828 fail:
829 pa_log_error("Unable to set volume: %s", snd_strerror(err));
830
831 return -1;
832 }
833
834 static int sink_get_mute_cb(pa_sink *s) {
835 struct userdata *u = s->userdata;
836 int err, sw;
837
838 pa_assert(u);
839 pa_assert(u->mixer_elem);
840
841 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
842 pa_log_error("Unable to get switch: %s", snd_strerror(err));
843 return -1;
844 }
845
846 s->muted = !sw;
847
848 return 0;
849 }
850
851 static int sink_set_mute_cb(pa_sink *s) {
852 struct userdata *u = s->userdata;
853 int err;
854
855 pa_assert(u);
856 pa_assert(u->mixer_elem);
857
858 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
859 pa_log_error("Unable to set switch: %s", snd_strerror(err));
860 return -1;
861 }
862
863 return 0;
864 }
865
866 static void sink_update_requested_latency_cb(pa_sink *s) {
867 struct userdata *u = s->userdata;
868 snd_pcm_sframes_t before;
869 pa_assert(u);
870
871 if (!u->pcm_handle)
872 return;
873
874 before = u->hwbuf_unused_frames;
875 update_sw_params(u);
876
877 /* Let's check whether we now use only a smaller part of the
878 buffer then before. If so, we need to make sure that subsequent
879 rewinds are relative to the new maxium fill level and not to the
880 current fill level. Thus, let's do a full rewind once, to clear
881 things up. */
882
883 if (u->hwbuf_unused_frames > before) {
884 pa_log_debug("Requesting rewind due to latency change.");
885 pa_sink_request_rewind(s, (size_t) -1);
886 }
887 }
888
889 static int process_rewind(struct userdata *u) {
890 snd_pcm_sframes_t unused;
891 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
892 pa_assert(u);
893
894 /* Figure out how much we shall rewind and reset the counter */
895 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
896 u->sink->thread_info.rewind_nbytes = 0;
897
898 if (rewind_nbytes <= 0)
899 goto finish;
900
901 pa_assert(rewind_nbytes > 0);
902 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
903
904 snd_pcm_hwsync(u->pcm_handle);
905 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
906 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
907 return -1;
908 }
909
910 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
911
912 if (u->hwbuf_size > unused_nbytes)
913 limit_nbytes = u->hwbuf_size - unused_nbytes;
914 else
915 limit_nbytes = 0;
916
917 if (rewind_nbytes > limit_nbytes)
918 rewind_nbytes = limit_nbytes;
919
920 if (rewind_nbytes > 0) {
921 snd_pcm_sframes_t in_frames, out_frames;
922
923 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
924
925 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
926 pa_log_debug("before: %lu", (unsigned long) in_frames);
927 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
928 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
929 return -1;
930 }
931 pa_log_debug("after: %lu", (unsigned long) out_frames);
932
933 rewind_nbytes = out_frames * u->frame_size;
934
935 if (rewind_nbytes <= 0)
936 pa_log_info("Tried rewind, but was apparently not possible.");
937 else {
938 u->frame_index -= out_frames;
939 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
940 pa_sink_process_rewind(u->sink, rewind_nbytes);
941
942 u->after_rewind = TRUE;
943 return 0;
944 }
945 } else
946 pa_log_debug("Mhmm, actually there is nothing to rewind.");
947
948 finish:
949
950 pa_sink_process_rewind(u->sink, 0);
951
952 return 0;
953
954 }
955
956 static void thread_func(void *userdata) {
957 struct userdata *u = userdata;
958
959 pa_assert(u);
960
961 pa_log_debug("Thread starting up");
962
963 if (u->core->realtime_scheduling)
964 pa_make_realtime(u->core->realtime_priority);
965
966 pa_thread_mq_install(&u->thread_mq);
967 pa_rtpoll_install(u->rtpoll);
968
969 for (;;) {
970 int ret;
971
972 /* pa_log_debug("loop"); */
973
974 /* Render some data and write it to the dsp */
975 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
976 int work_done;
977 pa_usec_t sleep_usec;
978
979 if (u->sink->thread_info.rewind_requested)
980 if (process_rewind(u) < 0)
981 goto fail;
982
983 if (u->use_mmap)
984 work_done = mmap_write(u, &sleep_usec);
985 else
986 work_done = unix_write(u, &sleep_usec);
987
988 if (work_done < 0)
989 goto fail;
990
991 /* pa_log_debug("work_done = %i", work_done); */
992
993 if (work_done) {
994
995 if (u->first) {
996 pa_log_info("Starting playback.");
997 snd_pcm_start(u->pcm_handle);
998
999 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1000 }
1001
1002 update_smoother(u);
1003 }
1004
1005 if (u->use_tsched) {
1006 pa_usec_t cusec;
1007
1008 if (u->since_start <= u->hwbuf_size) {
1009
1010 /* USB devices on ALSA seem to hit a buffer
1011 * underrun during the first iterations much
1012 * quicker then we calculate here, probably due to
1013 * the transport latency. To accomodate for that
1014 * we artificially decrease the sleep time until
1015 * we have filled the buffer at least once
1016 * completely.*/
1017
1018 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1019 sleep_usec /= 2;
1020 }
1021
1022 /* OK, the playback buffer is now full, let's
1023 * calculate when to wake up next */
1024 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1025
1026 /* Convert from the sound card time domain to the
1027 * system time domain */
1028 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1029
1030 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1031
1032 /* We don't trust the conversion, so we wake up whatever comes first */
1033 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1034 }
1035
1036 u->first = FALSE;
1037 u->after_rewind = FALSE;
1038
1039 } else if (u->use_tsched)
1040
1041 /* OK, we're in an invalid state, let's disable our timers */
1042 pa_rtpoll_set_timer_disabled(u->rtpoll);
1043
1044 /* Hmm, nothing to do. Let's sleep */
1045 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1046 goto fail;
1047
1048 if (ret == 0)
1049 goto finish;
1050
1051 /* Tell ALSA about this and process its response */
1052 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1053 struct pollfd *pollfd;
1054 unsigned short revents = 0;
1055 int err;
1056 unsigned n;
1057
1058 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1059
1060 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1061 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1062 goto fail;
1063 }
1064
1065 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1066 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1067 goto fail;
1068
1069 u->first = TRUE;
1070 u->since_start = 0;
1071 }
1072
1073 if (revents && u->use_tsched)
1074 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1075 }
1076 }
1077
1078 fail:
1079 /* If this was no regular exit from the loop we have to continue
1080 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1081 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1082 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1083
1084 finish:
1085 pa_log_debug("Thread shutting down");
1086 }
1087
1088 int pa__init(pa_module*m) {
1089
1090 pa_modargs *ma = NULL;
1091 struct userdata *u = NULL;
1092 const char *dev_id;
1093 pa_sample_spec ss;
1094 pa_channel_map map;
1095 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1096 snd_pcm_uframes_t period_frames, tsched_frames;
1097 size_t frame_size;
1098 snd_pcm_info_t *pcm_info = NULL;
1099 int err;
1100 const char *name;
1101 char *name_buf = NULL;
1102 pa_bool_t namereg_fail;
1103 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, mixer_reset = TRUE;
1104 pa_usec_t usec;
1105 pa_sink_new_data data;
1106
1107 snd_pcm_info_alloca(&pcm_info);
1108
1109 pa_assert(m);
1110
1111 pa_alsa_redirect_errors_inc();
1112
1113 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1114 pa_log("Failed to parse module arguments");
1115 goto fail;
1116 }
1117
1118 ss = m->core->default_sample_spec;
1119 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1120 pa_log("Failed to parse sample specification and channel map");
1121 goto fail;
1122 }
1123
1124 frame_size = pa_frame_size(&ss);
1125
1126 nfrags = m->core->default_n_fragments;
1127 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1128 if (frag_size <= 0)
1129 frag_size = frame_size;
1130 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1131 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1132
1133 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1134 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1135 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1136 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1137 pa_log("Failed to parse buffer metrics");
1138 goto fail;
1139 }
1140
1141 hwbuf_size = frag_size * nfrags;
1142 period_frames = frag_size/frame_size;
1143 tsched_frames = tsched_size/frame_size;
1144
1145 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1146 pa_log("Failed to parse mmap argument.");
1147 goto fail;
1148 }
1149
1150 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1151 pa_log("Failed to parse timer_scheduling argument.");
1152 goto fail;
1153 }
1154
1155 if (use_tsched && !pa_rtclock_hrtimer()) {
1156 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1157 use_tsched = FALSE;
1158 }
1159
1160 if (pa_modargs_get_value_boolean(ma, "mixer_reset", &mixer_reset) < 0) {
1161 pa_log("Failed to parse mixer_reset argument.");
1162 goto fail;
1163 }
1164
1165 u = pa_xnew0(struct userdata, 1);
1166 u->core = m->core;
1167 u->module = m;
1168 m->userdata = u;
1169 u->use_mmap = use_mmap;
1170 u->use_tsched = use_tsched;
1171 u->first = TRUE;
1172 u->since_start = 0;
1173 u->after_rewind = FALSE;
1174 u->rtpoll = pa_rtpoll_new();
1175 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1176 u->alsa_rtpoll_item = NULL;
1177
1178 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1179 usec = pa_rtclock_usec();
1180 pa_smoother_set_time_offset(u->smoother, usec);
1181 pa_smoother_pause(u->smoother, usec);
1182
1183 snd_config_update_free_global();
1184
1185 b = use_mmap;
1186 d = use_tsched;
1187
1188 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1189
1190 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1191 dev_id,
1192 &u->device_name,
1193 &ss, &map,
1194 SND_PCM_STREAM_PLAYBACK,
1195 &nfrags, &period_frames, tsched_frames,
1196 &b, &d)))
1197
1198 goto fail;
1199
1200 } else {
1201
1202 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1203 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1204 &u->device_name,
1205 &ss, &map,
1206 SND_PCM_STREAM_PLAYBACK,
1207 &nfrags, &period_frames, tsched_frames,
1208 &b, &d)))
1209 goto fail;
1210
1211 }
1212
1213 pa_assert(u->device_name);
1214 pa_log_info("Successfully opened device %s.", u->device_name);
1215
1216 if (use_mmap && !b) {
1217 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1218 u->use_mmap = use_mmap = FALSE;
1219 }
1220
1221 if (use_tsched && (!b || !d)) {
1222 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1223 u->use_tsched = use_tsched = FALSE;
1224 }
1225
1226 if (u->use_mmap)
1227 pa_log_info("Successfully enabled mmap() mode.");
1228
1229 if (u->use_tsched)
1230 pa_log_info("Successfully enabled timer-based scheduling mode.");
1231
1232 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1233 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1234 goto fail;
1235 }
1236
1237 /* ALSA might tweak the sample spec, so recalculate the frame size */
1238 frame_size = pa_frame_size(&ss);
1239
1240 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1241 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1242 else {
1243 pa_bool_t found = FALSE;
1244
1245 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1246 found = TRUE;
1247 else {
1248 snd_pcm_info_t *info;
1249
1250 snd_pcm_info_alloca(&info);
1251
1252 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1253 char *md;
1254 int card;
1255
1256 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1257
1258 md = pa_sprintf_malloc("hw:%i", card);
1259
1260 if (strcmp(u->device_name, md))
1261 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1262 found = TRUE;
1263 pa_xfree(md);
1264 }
1265 }
1266 }
1267
1268 if (found)
1269 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1270 found = FALSE;
1271
1272 if (!found) {
1273 snd_mixer_close(u->mixer_handle);
1274 u->mixer_handle = NULL;
1275 }
1276 }
1277
1278 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1279 namereg_fail = TRUE;
1280 else {
1281 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1282 namereg_fail = FALSE;
1283 }
1284
1285 pa_sink_new_data_init(&data);
1286 data.driver = __FILE__;
1287 data.module = m;
1288 pa_sink_new_data_set_name(&data, name);
1289 data.namereg_fail = namereg_fail;
1290 pa_sink_new_data_set_sample_spec(&data, &ss);
1291 pa_sink_new_data_set_channel_map(&data, &map);
1292
1293 pa_alsa_init_proplist(data.proplist, pcm_info);
1294 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1295 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1296 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1297 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1298
1299 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1300 pa_sink_new_data_done(&data);
1301 pa_xfree(name_buf);
1302
1303 if (!u->sink) {
1304 pa_log("Failed to create sink object");
1305 goto fail;
1306 }
1307
1308 u->sink->parent.process_msg = sink_process_msg;
1309 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1310 u->sink->userdata = u;
1311
1312 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1313 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1314
1315 u->frame_size = frame_size;
1316 u->fragment_size = frag_size = period_frames * frame_size;
1317 u->nfragments = nfrags;
1318 u->hwbuf_size = u->fragment_size * nfrags;
1319 u->hwbuf_unused_frames = 0;
1320 u->tsched_watermark = tsched_watermark;
1321 u->frame_index = 0;
1322 u->hw_dB_supported = FALSE;
1323 u->hw_dB_min = u->hw_dB_max = 0;
1324 u->hw_volume_min = u->hw_volume_max = 0;
1325
1326 if (use_tsched)
1327 fix_tsched_watermark(u);
1328
1329 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1330 u->sink->thread_info.max_request = u->hwbuf_size;
1331
1332 pa_sink_set_latency_range(u->sink,
1333 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1334 pa_bytes_to_usec(u->hwbuf_size, &ss));
1335
1336 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1337 nfrags, (long unsigned) u->fragment_size,
1338 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1339
1340 if (use_tsched)
1341 pa_log_info("Time scheduling watermark is %0.2fms",
1342 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1343
1344 if (update_sw_params(u) < 0)
1345 goto fail;
1346
1347 pa_memchunk_reset(&u->memchunk);
1348
1349 if (u->mixer_handle) {
1350 pa_assert(u->mixer_elem);
1351
1352 if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
1353
1354 if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0 &&
1355 snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) >= 0) {
1356
1357 pa_bool_t suitable = TRUE;
1358
1359 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1360
1361 if (u->hw_volume_min > u->hw_volume_max) {
1362
1363 pa_log_info("Minimal volume %li larger than maximum volume %li. Strange stuff Falling back to software volume control.", u->hw_volume_min, u->hw_volume_max);
1364 suitable = FALSE;
1365
1366 } else if (u->hw_volume_max - u->hw_volume_min < 3) {
1367
1368 pa_log_info("Device has less than 4 volume levels. Falling back to software volume control.");
1369 suitable = FALSE;
1370
1371 } else if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) >= 0) {
1372
1373 /* u->hw_dB_max = 0; u->hw_dB_min = -3000; Use this to make valgrind shut up */
1374
1375 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1376
1377 /* Let's see if this thing actually is useful for muting */
1378 if (u->hw_dB_min > -6000) {
1379 pa_log_info("Device cannot attenuate for more than -60 dB (only %0.2f dB supported), falling back to software volume control.", ((double) u->hw_dB_min) / 100);
1380
1381 suitable = FALSE;
1382 } else if (u->hw_dB_max < 0) {
1383
1384 pa_log_info("Device is still attenuated at maximum volume setting (%0.2f dB is maximum). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_max) / 100);
1385 suitable = FALSE;
1386
1387 } else if (u->hw_dB_min >= u->hw_dB_max) {
1388
1389 pa_log_info("Minimal dB (%0.2f) larger or equal to maximum dB (%0.2f). Strange stuff. Falling back to software volume control.", ((double) u->hw_dB_min) / 100, ((double) u->hw_dB_max) / 100);
1390 suitable = FALSE;
1391
1392 } else {
1393
1394 if (u->hw_dB_max > 0) {
1395 /* dB > 0 means overamplification, and clipping, we don't want that here */
1396 pa_log_info("Device can do overamplification for %0.2f dB. Limiting to 0 db", ((double) u->hw_dB_max) / 100);
1397 u->hw_dB_max = 0;
1398 }
1399
1400 u->hw_dB_supported = TRUE;
1401 }
1402 }
1403
1404 if (suitable) {
1405 u->sink->get_volume = sink_get_volume_cb;
1406 u->sink->set_volume = sink_set_volume_cb;
1407 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1408 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1409
1410 } else if (mixer_reset) {
1411 pa_log_info("Using software volume control. Trying to reset sound card to 0 dB.");
1412 pa_alsa_0dB_playback(u->mixer_elem);
1413 } else
1414 pa_log_info("Using software volume control. Leaving hw mixer controls untouched.");
1415 }
1416
1417 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1418 u->sink->get_mute = sink_get_mute_cb;
1419 u->sink->set_mute = sink_set_mute_cb;
1420 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1421 }
1422
1423 u->mixer_fdl = pa_alsa_fdlist_new();
1424
1425 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1426 pa_log("Failed to initialize file descriptor monitoring");
1427 goto fail;
1428 }
1429
1430 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1431 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1432 } else
1433 u->mixer_fdl = NULL;
1434
1435 pa_alsa_dump(u->pcm_handle);
1436
1437 if (!(u->thread = pa_thread_new(thread_func, u))) {
1438 pa_log("Failed to create thread.");
1439 goto fail;
1440 }
1441
1442 /* Get initial mixer settings */
1443 if (data.volume_is_set) {
1444 if (u->sink->set_volume)
1445 u->sink->set_volume(u->sink);
1446 } else {
1447 if (u->sink->get_volume)
1448 u->sink->get_volume(u->sink);
1449 }
1450
1451 if (data.muted_is_set) {
1452 if (u->sink->set_mute)
1453 u->sink->set_mute(u->sink);
1454 } else {
1455 if (u->sink->get_mute)
1456 u->sink->get_mute(u->sink);
1457 }
1458
1459 pa_sink_put(u->sink);
1460
1461 pa_modargs_free(ma);
1462
1463 return 0;
1464
1465 fail:
1466
1467 if (ma)
1468 pa_modargs_free(ma);
1469
1470 pa__done(m);
1471
1472 return -1;
1473 }
1474
1475 void pa__done(pa_module*m) {
1476 struct userdata *u;
1477
1478 pa_assert(m);
1479
1480 if (!(u = m->userdata)) {
1481 pa_alsa_redirect_errors_dec();
1482 return;
1483 }
1484
1485 if (u->sink)
1486 pa_sink_unlink(u->sink);
1487
1488 if (u->thread) {
1489 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1490 pa_thread_free(u->thread);
1491 }
1492
1493 pa_thread_mq_done(&u->thread_mq);
1494
1495 if (u->sink)
1496 pa_sink_unref(u->sink);
1497
1498 if (u->memchunk.memblock)
1499 pa_memblock_unref(u->memchunk.memblock);
1500
1501 if (u->alsa_rtpoll_item)
1502 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1503
1504 if (u->rtpoll)
1505 pa_rtpoll_free(u->rtpoll);
1506
1507 if (u->mixer_fdl)
1508 pa_alsa_fdlist_free(u->mixer_fdl);
1509
1510 if (u->mixer_handle)
1511 snd_mixer_close(u->mixer_handle);
1512
1513 if (u->pcm_handle) {
1514 snd_pcm_drop(u->pcm_handle);
1515 snd_pcm_close(u->pcm_handle);
1516 }
1517
1518 if (u->smoother)
1519 pa_smoother_free(u->smoother);
1520
1521 pa_xfree(u->device_name);
1522 pa_xfree(u);
1523
1524 snd_config_update_free_global();
1525
1526 pa_alsa_redirect_errors_dec();
1527 }