]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
extend hardware dB scale in software to full range if necessary, instead of reverting...
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/xmalloc.h>
32 #include <pulse/util.h>
33 #include <pulse/timeval.h>
34
35 #include <pulsecore/core.h>
36 #include <pulsecore/module.h>
37 #include <pulsecore/memchunk.h>
38 #include <pulsecore/sink.h>
39 #include <pulsecore/modargs.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/thread.h>
45 #include <pulsecore/core-error.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/rtclock.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "alsa-util.h"
52 #include "module-alsa-sink-symdef.h"
53
54 PA_MODULE_AUTHOR("Lennart Poettering");
55 PA_MODULE_DESCRIPTION("ALSA Sink");
56 PA_MODULE_VERSION(PACKAGE_VERSION);
57 PA_MODULE_LOAD_ONCE(FALSE);
58 PA_MODULE_USAGE(
59 "sink_name=<name for the sink> "
60 "device=<ALSA device> "
61 "device_id=<ALSA card index> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map> "
66 "fragments=<number of fragments> "
67 "fragment_size=<fragment size> "
68 "mmap=<enable memory mapping?> "
69 "tsched=<enable system timer based scheduling mode?> "
70 "tsched_buffer_size=<buffer size when using timer based scheduling> "
71 "tsched_buffer_watermark=<lower fill watermark>");
72
73 static const char* const valid_modargs[] = {
74 "sink_name",
75 "device",
76 "device_id",
77 "format",
78 "rate",
79 "channels",
80 "channel_map",
81 "fragments",
82 "fragment_size",
83 "mmap",
84 "tsched",
85 "tsched_buffer_size",
86 "tsched_buffer_watermark",
87 NULL
88 };
89
90 #define DEFAULT_DEVICE "default"
91 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
92 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
93 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
94 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
95
96 struct userdata {
97 pa_core *core;
98 pa_module *module;
99 pa_sink *sink;
100
101 pa_thread *thread;
102 pa_thread_mq thread_mq;
103 pa_rtpoll *rtpoll;
104
105 snd_pcm_t *pcm_handle;
106
107 pa_alsa_fdlist *mixer_fdl;
108 snd_mixer_t *mixer_handle;
109 snd_mixer_elem_t *mixer_elem;
110 long hw_volume_max, hw_volume_min;
111 long hw_dB_max, hw_dB_min;
112 pa_bool_t hw_dB_supported;
113 pa_bool_t mixer_seperate_channels;
114 pa_cvolume hardware_volume;
115
116 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
117 unsigned nfragments;
118 pa_memchunk memchunk;
119
120 char *device_name;
121
122 pa_bool_t use_mmap, use_tsched;
123
124 pa_bool_t first, after_rewind;
125
126 pa_rtpoll_item *alsa_rtpoll_item;
127
128 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130 pa_smoother *smoother;
131 int64_t frame_index;
132 uint64_t since_start;
133
134 snd_pcm_sframes_t hwbuf_unused_frames;
135 };
136
137 static void fix_tsched_watermark(struct userdata *u) {
138 size_t max_use;
139 size_t min_sleep, min_wakeup;
140 pa_assert(u);
141
142 max_use = u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size;
143
144 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
145 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
146
147 if (min_sleep > max_use/2)
148 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
149 if (min_sleep < u->frame_size)
150 min_sleep = u->frame_size;
151
152 if (min_wakeup > max_use/2)
153 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
154 if (min_wakeup < u->frame_size)
155 min_wakeup = u->frame_size;
156
157 if (u->tsched_watermark > max_use-min_sleep)
158 u->tsched_watermark = max_use-min_sleep;
159
160 if (u->tsched_watermark < min_wakeup)
161 u->tsched_watermark = min_wakeup;
162 }
163
164 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
165 pa_usec_t usec, wm;
166
167 pa_assert(sleep_usec);
168 pa_assert(process_usec);
169
170 pa_assert(u);
171
172 usec = pa_sink_get_requested_latency_within_thread(u->sink);
173
174 if (usec == (pa_usec_t) -1)
175 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
176
177 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
178
179 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
180
181 if (usec >= wm) {
182 *sleep_usec = usec - wm;
183 *process_usec = wm;
184 } else
185 *process_usec = *sleep_usec = usec / 2;
186
187 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
188 }
189
190 static int try_recover(struct userdata *u, const char *call, int err) {
191 pa_assert(u);
192 pa_assert(call);
193 pa_assert(err < 0);
194
195 pa_log_debug("%s: %s", call, snd_strerror(err));
196
197 pa_assert(err != -EAGAIN);
198
199 if (err == -EPIPE)
200 pa_log_debug("%s: Buffer underrun!", call);
201
202 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
203 u->first = TRUE;
204 u->since_start = 0;
205 return 0;
206 }
207
208 pa_log("%s: %s", call, snd_strerror(err));
209 return -1;
210 }
211
212 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
213 size_t left_to_play;
214
215 if (n*u->frame_size < u->hwbuf_size)
216 left_to_play = u->hwbuf_size - (n*u->frame_size);
217 else
218 left_to_play = 0;
219
220 if (left_to_play > 0) {
221 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
222 } else if (!u->first && !u->after_rewind) {
223 pa_log_info("Underrun!");
224
225 if (u->use_tsched) {
226 size_t old_watermark = u->tsched_watermark;
227
228 u->tsched_watermark *= 2;
229 fix_tsched_watermark(u);
230
231 if (old_watermark != u->tsched_watermark)
232 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
233 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
234 }
235 }
236
237 return left_to_play;
238 }
239
240 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
241 int work_done = 0;
242 pa_usec_t max_sleep_usec = 0, process_usec = 0;
243 size_t left_to_play;
244
245 pa_assert(u);
246 pa_sink_assert_ref(u->sink);
247
248 if (u->use_tsched)
249 hw_sleep_time(u, &max_sleep_usec, &process_usec);
250
251 for (;;) {
252 snd_pcm_sframes_t n;
253 int r;
254
255 snd_pcm_hwsync(u->pcm_handle);
256
257 /* First we determine how many samples are missing to fill the
258 * buffer up to 100% */
259
260 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
261
262 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
263 continue;
264
265 return r;
266 }
267
268 left_to_play = check_left_to_play(u, n);
269
270 if (u->use_tsched)
271
272 /* We won't fill up the playback buffer before at least
273 * half the sleep time is over because otherwise we might
274 * ask for more data from the clients then they expect. We
275 * need to guarantee that clients only have to keep around
276 * a single hw buffer length. */
277
278 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
279 break;
280
281 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
282 break;
283
284 n -= u->hwbuf_unused_frames;
285
286 /* pa_log_debug("Filling up"); */
287
288 for (;;) {
289 pa_memchunk chunk;
290 void *p;
291 int err;
292 const snd_pcm_channel_area_t *areas;
293 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
294
295 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
296
297 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
298
299 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
300 continue;
301
302 return r;
303 }
304
305 /* Make sure that if these memblocks need to be copied they will fit into one slot */
306 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
307 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
308
309 /* Check these are multiples of 8 bit */
310 pa_assert((areas[0].first & 7) == 0);
311 pa_assert((areas[0].step & 7)== 0);
312
313 /* We assume a single interleaved memory buffer */
314 pa_assert((areas[0].first >> 3) == 0);
315 pa_assert((areas[0].step >> 3) == u->frame_size);
316
317 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
318
319 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
320 chunk.length = pa_memblock_get_length(chunk.memblock);
321 chunk.index = 0;
322
323 pa_sink_render_into_full(u->sink, &chunk);
324
325 /* FIXME: Maybe we can do something to keep this memory block
326 * a little bit longer around? */
327 pa_memblock_unref_fixed(chunk.memblock);
328
329 if (PA_UNLIKELY((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
330
331 if ((r = try_recover(u, "snd_pcm_mmap_commit", err)) == 0)
332 continue;
333
334 return r;
335 }
336
337 work_done = 1;
338
339 u->frame_index += frames;
340 u->since_start += frames * u->frame_size;
341
342 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
343
344 if (frames >= (snd_pcm_uframes_t) n)
345 break;
346
347 n -= frames;
348 }
349 }
350
351 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
352 return work_done;
353 }
354
355 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
356 int work_done = 0;
357 pa_usec_t max_sleep_usec = 0, process_usec = 0;
358 size_t left_to_play;
359
360 pa_assert(u);
361 pa_sink_assert_ref(u->sink);
362
363 if (u->use_tsched)
364 hw_sleep_time(u, &max_sleep_usec, &process_usec);
365
366 for (;;) {
367 snd_pcm_sframes_t n;
368 int r;
369
370 snd_pcm_hwsync(u->pcm_handle);
371
372 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
373
374 if ((r = try_recover(u, "snd_pcm_avail_update", n)) == 0)
375 continue;
376
377 return r;
378 }
379
380 left_to_play = check_left_to_play(u, n);
381
382 if (u->use_tsched)
383
384 /* We won't fill up the playback buffer before at least
385 * half the sleep time is over because otherwise we might
386 * ask for more data from the clients then they expect. We
387 * need to guarantee that clients only have to keep around
388 * a single hw buffer length. */
389
390 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
391 break;
392
393 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
394 break;
395
396 n -= u->hwbuf_unused_frames;
397
398 for (;;) {
399 snd_pcm_sframes_t frames;
400 void *p;
401
402 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
403
404 if (u->memchunk.length <= 0)
405 pa_sink_render(u->sink, n * u->frame_size, &u->memchunk);
406
407 pa_assert(u->memchunk.length > 0);
408
409 frames = u->memchunk.length / u->frame_size;
410
411 if (frames > n)
412 frames = n;
413
414 p = pa_memblock_acquire(u->memchunk.memblock);
415 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, frames);
416 pa_memblock_release(u->memchunk.memblock);
417
418 pa_assert(frames != 0);
419
420 if (PA_UNLIKELY(frames < 0)) {
421
422 if ((r = try_recover(u, "snd_pcm_writei", n)) == 0)
423 continue;
424
425 return r;
426 }
427
428 u->memchunk.index += frames * u->frame_size;
429 u->memchunk.length -= frames * u->frame_size;
430
431 if (u->memchunk.length <= 0) {
432 pa_memblock_unref(u->memchunk.memblock);
433 pa_memchunk_reset(&u->memchunk);
434 }
435
436 work_done = 1;
437
438 u->frame_index += frames;
439 u->since_start += frames * u->frame_size;
440
441 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
442
443 if (frames >= n)
444 break;
445
446 n -= frames;
447 }
448 }
449
450 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
451 return work_done;
452 }
453
454 static void update_smoother(struct userdata *u) {
455 snd_pcm_sframes_t delay = 0;
456 int64_t frames;
457 int err;
458 pa_usec_t now1, now2;
459 /* struct timeval timestamp; */
460 snd_pcm_status_t *status;
461
462 snd_pcm_status_alloca(&status);
463
464 pa_assert(u);
465 pa_assert(u->pcm_handle);
466
467 /* Let's update the time smoother */
468
469 snd_pcm_hwsync(u->pcm_handle);
470 snd_pcm_avail_update(u->pcm_handle);
471
472 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
473 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
474 /* return; */
475 /* } */
476
477 /* delay = snd_pcm_status_get_delay(status); */
478
479 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
480 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
481 return;
482 }
483
484 frames = u->frame_index - delay;
485
486 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
487
488 /* snd_pcm_status_get_tstamp(status, &timestamp); */
489 /* pa_rtclock_from_wallclock(&timestamp); */
490 /* now1 = pa_timeval_load(&timestamp); */
491
492 now1 = pa_rtclock_usec();
493 now2 = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
494 pa_smoother_put(u->smoother, now1, now2);
495 }
496
497 static pa_usec_t sink_get_latency(struct userdata *u) {
498 pa_usec_t r = 0;
499 int64_t delay;
500 pa_usec_t now1, now2;
501
502 pa_assert(u);
503
504 now1 = pa_rtclock_usec();
505 now2 = pa_smoother_get(u->smoother, now1);
506
507 delay = (int64_t) pa_bytes_to_usec(u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
508
509 if (delay > 0)
510 r = (pa_usec_t) delay;
511
512 if (u->memchunk.memblock)
513 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
514
515 return r;
516 }
517
518 static int build_pollfd(struct userdata *u) {
519 pa_assert(u);
520 pa_assert(u->pcm_handle);
521
522 if (u->alsa_rtpoll_item)
523 pa_rtpoll_item_free(u->alsa_rtpoll_item);
524
525 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
526 return -1;
527
528 return 0;
529 }
530
531 static int suspend(struct userdata *u) {
532 pa_assert(u);
533 pa_assert(u->pcm_handle);
534
535 pa_smoother_pause(u->smoother, pa_rtclock_usec());
536
537 /* Let's suspend */
538 snd_pcm_drain(u->pcm_handle);
539 snd_pcm_close(u->pcm_handle);
540 u->pcm_handle = NULL;
541
542 if (u->alsa_rtpoll_item) {
543 pa_rtpoll_item_free(u->alsa_rtpoll_item);
544 u->alsa_rtpoll_item = NULL;
545 }
546
547 pa_log_info("Device suspended...");
548
549 return 0;
550 }
551
552 static int update_sw_params(struct userdata *u) {
553 snd_pcm_uframes_t avail_min;
554 int err;
555
556 pa_assert(u);
557
558 /* Use the full buffer if noone asked us for anything specific */
559 u->hwbuf_unused_frames = 0;
560
561 if (u->use_tsched) {
562 pa_usec_t latency;
563
564 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
565 size_t b;
566
567 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
568
569 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
570
571 /* We need at least one sample in our buffer */
572
573 if (PA_UNLIKELY(b < u->frame_size))
574 b = u->frame_size;
575
576 u->hwbuf_unused_frames =
577 PA_LIKELY(b < u->hwbuf_size) ?
578 ((u->hwbuf_size - b) / u->frame_size) : 0;
579
580 fix_tsched_watermark(u);
581 }
582 }
583
584 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
585
586 /* We need at last one frame in the used part of the buffer */
587 avail_min = u->hwbuf_unused_frames + 1;
588
589 if (u->use_tsched) {
590 pa_usec_t sleep_usec, process_usec;
591
592 hw_sleep_time(u, &sleep_usec, &process_usec);
593 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
594 }
595
596 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
597
598 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
599 pa_log("Failed to set software parameters: %s", snd_strerror(err));
600 return err;
601 }
602
603 pa_sink_set_max_request(u->sink, u->hwbuf_size - u->hwbuf_unused_frames * u->frame_size);
604
605 return 0;
606 }
607
608 static int unsuspend(struct userdata *u) {
609 pa_sample_spec ss;
610 int err;
611 pa_bool_t b, d;
612 unsigned nfrags;
613 snd_pcm_uframes_t period_size;
614
615 pa_assert(u);
616 pa_assert(!u->pcm_handle);
617
618 pa_log_info("Trying resume...");
619
620 snd_config_update_free_global();
621 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
622 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
623 goto fail;
624 }
625
626 ss = u->sink->sample_spec;
627 nfrags = u->nfragments;
628 period_size = u->fragment_size / u->frame_size;
629 b = u->use_mmap;
630 d = u->use_tsched;
631
632 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
633 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
634 goto fail;
635 }
636
637 if (b != u->use_mmap || d != u->use_tsched) {
638 pa_log_warn("Resume failed, couldn't get original access mode.");
639 goto fail;
640 }
641
642 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
643 pa_log_warn("Resume failed, couldn't restore original sample settings.");
644 goto fail;
645 }
646
647 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
648 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
649 goto fail;
650 }
651
652 if (update_sw_params(u) < 0)
653 goto fail;
654
655 if (build_pollfd(u) < 0)
656 goto fail;
657
658 /* FIXME: We need to reload the volume somehow */
659
660 u->first = TRUE;
661 u->since_start = 0;
662
663 pa_log_info("Resumed successfully...");
664
665 return 0;
666
667 fail:
668 if (u->pcm_handle) {
669 snd_pcm_close(u->pcm_handle);
670 u->pcm_handle = NULL;
671 }
672
673 return -1;
674 }
675
676 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
677 struct userdata *u = PA_SINK(o)->userdata;
678
679 switch (code) {
680
681 case PA_SINK_MESSAGE_GET_LATENCY: {
682 pa_usec_t r = 0;
683
684 if (u->pcm_handle)
685 r = sink_get_latency(u);
686
687 *((pa_usec_t*) data) = r;
688
689 return 0;
690 }
691
692 case PA_SINK_MESSAGE_SET_STATE:
693
694 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
695
696 case PA_SINK_SUSPENDED:
697 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
698
699 if (suspend(u) < 0)
700 return -1;
701
702 break;
703
704 case PA_SINK_IDLE:
705 case PA_SINK_RUNNING:
706
707 if (u->sink->thread_info.state == PA_SINK_INIT) {
708 if (build_pollfd(u) < 0)
709 return -1;
710 }
711
712 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
713 if (unsuspend(u) < 0)
714 return -1;
715 }
716
717 break;
718
719 case PA_SINK_UNLINKED:
720 case PA_SINK_INIT:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink, TRUE);
741 pa_sink_get_mute(u->sink, TRUE);
742 }
743
744 return 0;
745 }
746
747 static int sink_get_volume_cb(pa_sink *s) {
748 struct userdata *u = s->userdata;
749 int err;
750 unsigned i;
751 pa_cvolume r;
752 char t[PA_CVOLUME_SNPRINT_MAX];
753
754 pa_assert(u);
755 pa_assert(u->mixer_elem);
756
757 if (u->mixer_seperate_channels) {
758
759 r.channels = s->sample_spec.channels;
760
761 for (i = 0; i < s->sample_spec.channels; i++) {
762 long alsa_vol;
763
764 if (u->hw_dB_supported) {
765
766 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
767 goto fail;
768
769 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
770 } else {
771
772 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
773 goto fail;
774
775 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
776 }
777 }
778
779 } else {
780 long alsa_vol;
781
782 pa_assert(u->hw_dB_supported);
783
784 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
785 goto fail;
786
787 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
788 }
789
790 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
791
792 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
793
794 u->hardware_volume = s->volume = r;
795
796 if (u->hw_dB_supported) {
797 pa_cvolume reset;
798
799 /* Hmm, so the hardware volume changed, let's reset our software volume */
800
801 pa_cvolume_reset(&reset, s->sample_spec.channels);
802 pa_sink_set_soft_volume(s, &reset);
803 }
804 }
805
806 return 0;
807
808 fail:
809 pa_log_error("Unable to read volume: %s", snd_strerror(err));
810
811 return -1;
812 }
813
814 static int sink_set_volume_cb(pa_sink *s) {
815 struct userdata *u = s->userdata;
816 int err;
817 unsigned i;
818 pa_cvolume r;
819
820 pa_assert(u);
821 pa_assert(u->mixer_elem);
822
823 if (u->mixer_seperate_channels) {
824
825 r.channels = s->sample_spec.channels;
826
827 for (i = 0; i < s->sample_spec.channels; i++) {
828 long alsa_vol;
829 pa_volume_t vol;
830
831 vol = s->volume.values[i];
832
833 if (u->hw_dB_supported) {
834
835 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
836 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
837
838 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
839 goto fail;
840
841 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
842 goto fail;
843
844 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
845 } else {
846
847 alsa_vol = (long) round(((double) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
848 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
849
850 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
851 goto fail;
852
853 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
854 goto fail;
855
856 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
857 }
858 }
859
860 } else {
861 pa_volume_t vol;
862 long alsa_vol;
863
864 pa_assert(u->hw_dB_supported);
865
866 vol = pa_cvolume_max(&s->volume);
867
868 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
869 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
870
871 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
872 goto fail;
873
874 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
875 goto fail;
876
877 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
878 }
879
880 u->hardware_volume = r;
881
882 if (u->hw_dB_supported) {
883 char t[PA_CVOLUME_SNPRINT_MAX];
884
885 /* Match exactly what the user requested by software */
886
887 pa_alsa_volume_divide(&r, &s->volume);
888 pa_sink_set_soft_volume(s, &r);
889
890 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
891 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
892 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
893
894 } else
895
896 /* We can't match exactly what the user requested, hence let's
897 * at least tell the user about it */
898
899 s->volume = r;
900
901 return 0;
902
903 fail:
904 pa_log_error("Unable to set volume: %s", snd_strerror(err));
905
906 return -1;
907 }
908
909 static int sink_get_mute_cb(pa_sink *s) {
910 struct userdata *u = s->userdata;
911 int err, sw;
912
913 pa_assert(u);
914 pa_assert(u->mixer_elem);
915
916 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
917 pa_log_error("Unable to get switch: %s", snd_strerror(err));
918 return -1;
919 }
920
921 s->muted = !sw;
922
923 return 0;
924 }
925
926 static int sink_set_mute_cb(pa_sink *s) {
927 struct userdata *u = s->userdata;
928 int err;
929
930 pa_assert(u);
931 pa_assert(u->mixer_elem);
932
933 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
934 pa_log_error("Unable to set switch: %s", snd_strerror(err));
935 return -1;
936 }
937
938 return 0;
939 }
940
941 static void sink_update_requested_latency_cb(pa_sink *s) {
942 struct userdata *u = s->userdata;
943 snd_pcm_sframes_t before;
944 pa_assert(u);
945
946 if (!u->pcm_handle)
947 return;
948
949 before = u->hwbuf_unused_frames;
950 update_sw_params(u);
951
952 /* Let's check whether we now use only a smaller part of the
953 buffer then before. If so, we need to make sure that subsequent
954 rewinds are relative to the new maxium fill level and not to the
955 current fill level. Thus, let's do a full rewind once, to clear
956 things up. */
957
958 if (u->hwbuf_unused_frames > before) {
959 pa_log_debug("Requesting rewind due to latency change.");
960 pa_sink_request_rewind(s, (size_t) -1);
961 }
962 }
963
964 static int process_rewind(struct userdata *u) {
965 snd_pcm_sframes_t unused;
966 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
967 pa_assert(u);
968
969 /* Figure out how much we shall rewind and reset the counter */
970 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
971 u->sink->thread_info.rewind_nbytes = 0;
972
973 if (rewind_nbytes <= 0)
974 goto finish;
975
976 pa_assert(rewind_nbytes > 0);
977 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
978
979 snd_pcm_hwsync(u->pcm_handle);
980 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
981 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror(unused));
982 return -1;
983 }
984
985 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
986
987 if (u->hwbuf_size > unused_nbytes)
988 limit_nbytes = u->hwbuf_size - unused_nbytes;
989 else
990 limit_nbytes = 0;
991
992 if (rewind_nbytes > limit_nbytes)
993 rewind_nbytes = limit_nbytes;
994
995 if (rewind_nbytes > 0) {
996 snd_pcm_sframes_t in_frames, out_frames;
997
998 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
999
1000 in_frames = (snd_pcm_sframes_t) rewind_nbytes / u->frame_size;
1001 pa_log_debug("before: %lu", (unsigned long) in_frames);
1002 if ((out_frames = snd_pcm_rewind(u->pcm_handle, in_frames)) < 0) {
1003 pa_log("snd_pcm_rewind() failed: %s", snd_strerror(out_frames));
1004 return -1;
1005 }
1006 pa_log_debug("after: %lu", (unsigned long) out_frames);
1007
1008 rewind_nbytes = out_frames * u->frame_size;
1009
1010 if (rewind_nbytes <= 0)
1011 pa_log_info("Tried rewind, but was apparently not possible.");
1012 else {
1013 u->frame_index -= out_frames;
1014 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1015 pa_sink_process_rewind(u->sink, rewind_nbytes);
1016
1017 u->after_rewind = TRUE;
1018 return 0;
1019 }
1020 } else
1021 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1022
1023 finish:
1024
1025 pa_sink_process_rewind(u->sink, 0);
1026
1027 return 0;
1028
1029 }
1030
1031 static void thread_func(void *userdata) {
1032 struct userdata *u = userdata;
1033
1034 pa_assert(u);
1035
1036 pa_log_debug("Thread starting up");
1037
1038 if (u->core->realtime_scheduling)
1039 pa_make_realtime(u->core->realtime_priority);
1040
1041 pa_thread_mq_install(&u->thread_mq);
1042 pa_rtpoll_install(u->rtpoll);
1043
1044 for (;;) {
1045 int ret;
1046
1047 /* pa_log_debug("loop"); */
1048
1049 /* Render some data and write it to the dsp */
1050 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1051 int work_done;
1052 pa_usec_t sleep_usec = 0;
1053
1054 if (u->sink->thread_info.rewind_requested)
1055 if (process_rewind(u) < 0)
1056 goto fail;
1057
1058 if (u->use_mmap)
1059 work_done = mmap_write(u, &sleep_usec);
1060 else
1061 work_done = unix_write(u, &sleep_usec);
1062
1063 if (work_done < 0)
1064 goto fail;
1065
1066 /* pa_log_debug("work_done = %i", work_done); */
1067
1068 if (work_done) {
1069
1070 if (u->first) {
1071 pa_log_info("Starting playback.");
1072 snd_pcm_start(u->pcm_handle);
1073
1074 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1075 }
1076
1077 update_smoother(u);
1078 }
1079
1080 if (u->use_tsched) {
1081 pa_usec_t cusec;
1082
1083 if (u->since_start <= u->hwbuf_size) {
1084
1085 /* USB devices on ALSA seem to hit a buffer
1086 * underrun during the first iterations much
1087 * quicker then we calculate here, probably due to
1088 * the transport latency. To accomodate for that
1089 * we artificially decrease the sleep time until
1090 * we have filled the buffer at least once
1091 * completely.*/
1092
1093 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1094 sleep_usec /= 2;
1095 }
1096
1097 /* OK, the playback buffer is now full, let's
1098 * calculate when to wake up next */
1099 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1100
1101 /* Convert from the sound card time domain to the
1102 * system time domain */
1103 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1104
1105 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1106
1107 /* We don't trust the conversion, so we wake up whatever comes first */
1108 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1109 }
1110
1111 u->first = FALSE;
1112 u->after_rewind = FALSE;
1113
1114 } else if (u->use_tsched)
1115
1116 /* OK, we're in an invalid state, let's disable our timers */
1117 pa_rtpoll_set_timer_disabled(u->rtpoll);
1118
1119 /* Hmm, nothing to do. Let's sleep */
1120 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1121 goto fail;
1122
1123 if (ret == 0)
1124 goto finish;
1125
1126 /* Tell ALSA about this and process its response */
1127 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1128 struct pollfd *pollfd;
1129 unsigned short revents = 0;
1130 int err;
1131 unsigned n;
1132
1133 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1134
1135 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1136 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1137 goto fail;
1138 }
1139
1140 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1141 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1142 goto fail;
1143
1144 u->first = TRUE;
1145 u->since_start = 0;
1146 }
1147
1148 if (revents && u->use_tsched)
1149 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1150 }
1151 }
1152
1153 fail:
1154 /* If this was no regular exit from the loop we have to continue
1155 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1156 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1157 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1158
1159 finish:
1160 pa_log_debug("Thread shutting down");
1161 }
1162
1163 int pa__init(pa_module*m) {
1164
1165 pa_modargs *ma = NULL;
1166 struct userdata *u = NULL;
1167 const char *dev_id;
1168 pa_sample_spec ss;
1169 pa_channel_map map;
1170 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1171 snd_pcm_uframes_t period_frames, tsched_frames;
1172 size_t frame_size;
1173 snd_pcm_info_t *pcm_info = NULL;
1174 int err;
1175 const char *name;
1176 char *name_buf = NULL;
1177 pa_bool_t namereg_fail;
1178 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1179 pa_usec_t usec;
1180 pa_sink_new_data data;
1181
1182 snd_pcm_info_alloca(&pcm_info);
1183
1184 pa_assert(m);
1185
1186 pa_alsa_redirect_errors_inc();
1187
1188 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1189 pa_log("Failed to parse module arguments");
1190 goto fail;
1191 }
1192
1193 ss = m->core->default_sample_spec;
1194 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1195 pa_log("Failed to parse sample specification and channel map");
1196 goto fail;
1197 }
1198
1199 frame_size = pa_frame_size(&ss);
1200
1201 nfrags = m->core->default_n_fragments;
1202 frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1203 if (frag_size <= 0)
1204 frag_size = frame_size;
1205 tsched_size = pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1206 tsched_watermark = pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1207
1208 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1209 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1210 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1211 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1212 pa_log("Failed to parse buffer metrics");
1213 goto fail;
1214 }
1215
1216 hwbuf_size = frag_size * nfrags;
1217 period_frames = frag_size/frame_size;
1218 tsched_frames = tsched_size/frame_size;
1219
1220 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1221 pa_log("Failed to parse mmap argument.");
1222 goto fail;
1223 }
1224
1225 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1226 pa_log("Failed to parse tsched argument.");
1227 goto fail;
1228 }
1229
1230 if (use_tsched && !pa_rtclock_hrtimer()) {
1231 pa_log("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1232 use_tsched = FALSE;
1233 }
1234
1235 u = pa_xnew0(struct userdata, 1);
1236 u->core = m->core;
1237 u->module = m;
1238 m->userdata = u;
1239 u->use_mmap = use_mmap;
1240 u->use_tsched = use_tsched;
1241 u->first = TRUE;
1242 u->since_start = 0;
1243 u->after_rewind = FALSE;
1244 u->rtpoll = pa_rtpoll_new();
1245 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1246 u->alsa_rtpoll_item = NULL;
1247
1248 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1249 usec = pa_rtclock_usec();
1250 pa_smoother_set_time_offset(u->smoother, usec);
1251 pa_smoother_pause(u->smoother, usec);
1252
1253 snd_config_update_free_global();
1254
1255 b = use_mmap;
1256 d = use_tsched;
1257
1258 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1259
1260 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1261 dev_id,
1262 &u->device_name,
1263 &ss, &map,
1264 SND_PCM_STREAM_PLAYBACK,
1265 &nfrags, &period_frames, tsched_frames,
1266 &b, &d)))
1267
1268 goto fail;
1269
1270 } else {
1271
1272 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1273 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1274 &u->device_name,
1275 &ss, &map,
1276 SND_PCM_STREAM_PLAYBACK,
1277 &nfrags, &period_frames, tsched_frames,
1278 &b, &d)))
1279 goto fail;
1280
1281 }
1282
1283 pa_assert(u->device_name);
1284 pa_log_info("Successfully opened device %s.", u->device_name);
1285
1286 if (use_mmap && !b) {
1287 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1288 u->use_mmap = use_mmap = FALSE;
1289 }
1290
1291 if (use_tsched && (!b || !d)) {
1292 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1293 u->use_tsched = use_tsched = FALSE;
1294 }
1295
1296 if (u->use_mmap)
1297 pa_log_info("Successfully enabled mmap() mode.");
1298
1299 if (u->use_tsched)
1300 pa_log_info("Successfully enabled timer-based scheduling mode.");
1301
1302 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1303 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1304 goto fail;
1305 }
1306
1307 /* ALSA might tweak the sample spec, so recalculate the frame size */
1308 frame_size = pa_frame_size(&ss);
1309
1310 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1311 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1312 else {
1313 pa_bool_t found = FALSE;
1314
1315 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1316 found = TRUE;
1317 else {
1318 snd_pcm_info_t *info;
1319
1320 snd_pcm_info_alloca(&info);
1321
1322 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1323 char *md;
1324 int card;
1325
1326 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1327
1328 md = pa_sprintf_malloc("hw:%i", card);
1329
1330 if (strcmp(u->device_name, md))
1331 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1332 found = TRUE;
1333 pa_xfree(md);
1334 }
1335 }
1336 }
1337
1338 if (found)
1339 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1340 found = FALSE;
1341
1342 if (!found) {
1343 snd_mixer_close(u->mixer_handle);
1344 u->mixer_handle = NULL;
1345 }
1346 }
1347
1348 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1349 namereg_fail = TRUE;
1350 else {
1351 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1352 namereg_fail = FALSE;
1353 }
1354
1355 pa_sink_new_data_init(&data);
1356 data.driver = __FILE__;
1357 data.module = m;
1358 pa_sink_new_data_set_name(&data, name);
1359 data.namereg_fail = namereg_fail;
1360 pa_sink_new_data_set_sample_spec(&data, &ss);
1361 pa_sink_new_data_set_channel_map(&data, &map);
1362
1363 pa_alsa_init_proplist(data.proplist, pcm_info);
1364 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1365 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1366 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1367 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1368
1369 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1370 pa_sink_new_data_done(&data);
1371 pa_xfree(name_buf);
1372
1373 if (!u->sink) {
1374 pa_log("Failed to create sink object");
1375 goto fail;
1376 }
1377
1378 u->sink->parent.process_msg = sink_process_msg;
1379 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1380 u->sink->userdata = u;
1381
1382 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1383 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1384
1385 u->frame_size = frame_size;
1386 u->fragment_size = frag_size = period_frames * frame_size;
1387 u->nfragments = nfrags;
1388 u->hwbuf_size = u->fragment_size * nfrags;
1389 u->hwbuf_unused_frames = 0;
1390 u->tsched_watermark = tsched_watermark;
1391 u->frame_index = 0;
1392 u->hw_dB_supported = FALSE;
1393 u->hw_dB_min = u->hw_dB_max = 0;
1394 u->hw_volume_min = u->hw_volume_max = 0;
1395 u->mixer_seperate_channels = FALSE;
1396 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1397
1398 if (use_tsched)
1399 fix_tsched_watermark(u);
1400
1401 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1402 u->sink->thread_info.max_request = u->hwbuf_size;
1403
1404 pa_sink_set_latency_range(u->sink,
1405 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1406 pa_bytes_to_usec(u->hwbuf_size, &ss));
1407
1408 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1409 nfrags, (long unsigned) u->fragment_size,
1410 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1411
1412 if (use_tsched)
1413 pa_log_info("Time scheduling watermark is %0.2fms",
1414 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1415
1416 if (update_sw_params(u) < 0)
1417 goto fail;
1418
1419 pa_memchunk_reset(&u->memchunk);
1420
1421 if (u->mixer_handle) {
1422 pa_assert(u->mixer_elem);
1423
1424 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1425 pa_bool_t suitable = TRUE;
1426
1427 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0) {
1428 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1429 suitable = FALSE;
1430 } else {
1431 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1432 pa_assert(u->hw_volume_min < u->hw_volume_max);
1433 }
1434
1435 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1436 pa_log_info("Mixer doesn't support dB information.");
1437 else {
1438 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", u->hw_dB_min/100.0, u->hw_dB_max/100.0);
1439 pa_assert(u->hw_dB_min < u->hw_dB_max);
1440 u->hw_dB_supported = TRUE;
1441 }
1442
1443 if (suitable &&
1444 !u->hw_dB_supported &&
1445 u->hw_volume_max - u->hw_volume_min < 3) {
1446
1447 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1448 suitable = FALSE;
1449 }
1450
1451 if (suitable) {
1452 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1453
1454 u->sink->get_volume = sink_get_volume_cb;
1455 u->sink->set_volume = sink_set_volume_cb;
1456 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1457 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1458
1459 } else
1460 pa_log_info("Using software volume control.");
1461 }
1462
1463 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1464 u->sink->get_mute = sink_get_mute_cb;
1465 u->sink->set_mute = sink_set_mute_cb;
1466 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1467 } else
1468 pa_log_info("Using software mute control.");
1469
1470 u->mixer_fdl = pa_alsa_fdlist_new();
1471
1472 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1473 pa_log("Failed to initialize file descriptor monitoring");
1474 goto fail;
1475 }
1476
1477 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1478 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1479 } else
1480 u->mixer_fdl = NULL;
1481
1482 pa_alsa_dump(u->pcm_handle);
1483
1484 if (!(u->thread = pa_thread_new(thread_func, u))) {
1485 pa_log("Failed to create thread.");
1486 goto fail;
1487 }
1488
1489 /* Get initial mixer settings */
1490 if (data.volume_is_set) {
1491 if (u->sink->set_volume)
1492 u->sink->set_volume(u->sink);
1493 } else {
1494 if (u->sink->get_volume)
1495 u->sink->get_volume(u->sink);
1496 }
1497
1498 if (data.muted_is_set) {
1499 if (u->sink->set_mute)
1500 u->sink->set_mute(u->sink);
1501 } else {
1502 if (u->sink->get_mute)
1503 u->sink->get_mute(u->sink);
1504 }
1505
1506 pa_sink_put(u->sink);
1507
1508 pa_modargs_free(ma);
1509
1510 return 0;
1511
1512 fail:
1513
1514 if (ma)
1515 pa_modargs_free(ma);
1516
1517 pa__done(m);
1518
1519 return -1;
1520 }
1521
1522 void pa__done(pa_module*m) {
1523 struct userdata *u;
1524
1525 pa_assert(m);
1526
1527 if (!(u = m->userdata)) {
1528 pa_alsa_redirect_errors_dec();
1529 return;
1530 }
1531
1532 if (u->sink)
1533 pa_sink_unlink(u->sink);
1534
1535 if (u->thread) {
1536 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1537 pa_thread_free(u->thread);
1538 }
1539
1540 pa_thread_mq_done(&u->thread_mq);
1541
1542 if (u->sink)
1543 pa_sink_unref(u->sink);
1544
1545 if (u->memchunk.memblock)
1546 pa_memblock_unref(u->memchunk.memblock);
1547
1548 if (u->alsa_rtpoll_item)
1549 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1550
1551 if (u->rtpoll)
1552 pa_rtpoll_free(u->rtpoll);
1553
1554 if (u->mixer_fdl)
1555 pa_alsa_fdlist_free(u->mixer_fdl);
1556
1557 if (u->mixer_handle)
1558 snd_mixer_close(u->mixer_handle);
1559
1560 if (u->pcm_handle) {
1561 snd_pcm_drop(u->pcm_handle);
1562 snd_pcm_close(u->pcm_handle);
1563 }
1564
1565 if (u->smoother)
1566 pa_smoother_free(u->smoother);
1567
1568 pa_xfree(u->device_name);
1569 pa_xfree(u);
1570
1571 snd_config_update_free_global();
1572
1573 pa_alsa_redirect_errors_dec();
1574 }