]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
Downgrade hrtimer warning to notice level
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
57
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
67 "rate=<sample rate> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
76
77 static const char* const valid_modargs[] = {
78 "sink_name",
79 "device",
80 "device_id",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 "fragments",
86 "fragment_size",
87 "mmap",
88 "tsched",
89 "tsched_buffer_size",
90 "tsched_buffer_watermark",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117 pa_bool_t mixer_seperate_channels;
118 pa_cvolume hardware_volume;
119
120 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
121 unsigned nfragments;
122 pa_memchunk memchunk;
123
124 char *device_name;
125
126 pa_bool_t use_mmap, use_tsched;
127
128 pa_bool_t first, after_rewind;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
133
134 pa_smoother *smoother;
135 int64_t frame_index;
136 uint64_t since_start;
137
138 snd_pcm_sframes_t hwbuf_unused_frames;
139 };
140
141 static void fix_tsched_watermark(struct userdata *u) {
142 size_t max_use;
143 size_t min_sleep, min_wakeup;
144 pa_assert(u);
145
146 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
147
148 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
149 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
150
151 if (min_sleep > max_use/2)
152 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
153 if (min_sleep < u->frame_size)
154 min_sleep = u->frame_size;
155
156 if (min_wakeup > max_use/2)
157 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
158 if (min_wakeup < u->frame_size)
159 min_wakeup = u->frame_size;
160
161 if (u->tsched_watermark > max_use-min_sleep)
162 u->tsched_watermark = max_use-min_sleep;
163
164 if (u->tsched_watermark < min_wakeup)
165 u->tsched_watermark = min_wakeup;
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
182
183 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
184
185 if (usec >= wm) {
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188 } else
189 *process_usec = *sleep_usec = usec / 2;
190
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 }
193
194 static int try_recover(struct userdata *u, const char *call, int err) {
195 pa_assert(u);
196 pa_assert(call);
197 pa_assert(err < 0);
198
199 pa_log_debug("%s: %s", call, snd_strerror(err));
200
201 pa_assert(err != -EAGAIN);
202
203 if (err == -EPIPE)
204 pa_log_debug("%s: Buffer underrun!", call);
205
206 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
207 u->first = TRUE;
208 u->since_start = 0;
209 return 0;
210 }
211
212 pa_log("%s: %s", call, snd_strerror(err));
213 return -1;
214 }
215
216 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
217 size_t left_to_play;
218
219 if ((size_t) n*u->frame_size < u->hwbuf_size)
220 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
221 else
222 left_to_play = 0;
223
224 if (left_to_play > 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u->first && !u->after_rewind) {
227 pa_log_info("Underrun!");
228
229 if (u->use_tsched) {
230 size_t old_watermark = u->tsched_watermark;
231
232 u->tsched_watermark *= 2;
233 fix_tsched_watermark(u);
234
235 if (old_watermark != u->tsched_watermark)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
238 }
239 }
240
241 return left_to_play;
242 }
243
244 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
245 int work_done = 0;
246 pa_usec_t max_sleep_usec = 0, process_usec = 0;
247 size_t left_to_play;
248
249 pa_assert(u);
250 pa_sink_assert_ref(u->sink);
251
252 if (u->use_tsched)
253 hw_sleep_time(u, &max_sleep_usec, &process_usec);
254
255 for (;;) {
256 snd_pcm_sframes_t n;
257 int r;
258
259 snd_pcm_hwsync(u->pcm_handle);
260
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
263
264 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
265
266 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
267 continue;
268
269 return r;
270 }
271
272 left_to_play = check_left_to_play(u, n);
273
274 if (u->use_tsched)
275
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
281
282 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
283 break;
284
285 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
286 break;
287
288 n -= u->hwbuf_unused_frames;
289
290 /* pa_log_debug("Filling up"); */
291
292 for (;;) {
293 pa_memchunk chunk;
294 void *p;
295 int err;
296 const snd_pcm_channel_area_t *areas;
297 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
298 snd_pcm_sframes_t sframes;
299
300 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
301
302 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
303
304 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
305 continue;
306
307 return r;
308 }
309
310 /* Make sure that if these memblocks need to be copied they will fit into one slot */
311 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
312 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
313
314 /* Check these are multiples of 8 bit */
315 pa_assert((areas[0].first & 7) == 0);
316 pa_assert((areas[0].step & 7)== 0);
317
318 /* We assume a single interleaved memory buffer */
319 pa_assert((areas[0].first >> 3) == 0);
320 pa_assert((areas[0].step >> 3) == u->frame_size);
321
322 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
323
324 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
325 chunk.length = pa_memblock_get_length(chunk.memblock);
326 chunk.index = 0;
327
328 pa_sink_render_into_full(u->sink, &chunk);
329
330 /* FIXME: Maybe we can do something to keep this memory block
331 * a little bit longer around? */
332 pa_memblock_unref_fixed(chunk.memblock);
333
334 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
335
336 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
337 continue;
338
339 return r;
340 }
341
342 work_done = 1;
343
344 u->frame_index += (int64_t) frames;
345 u->since_start += frames * u->frame_size;
346
347 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
348
349 if (frames >= (snd_pcm_uframes_t) n)
350 break;
351
352 n -= (snd_pcm_sframes_t) frames;
353 }
354 }
355
356 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
357 return work_done;
358 }
359
360 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
361 int work_done = 0;
362 pa_usec_t max_sleep_usec = 0, process_usec = 0;
363 size_t left_to_play;
364
365 pa_assert(u);
366 pa_sink_assert_ref(u->sink);
367
368 if (u->use_tsched)
369 hw_sleep_time(u, &max_sleep_usec, &process_usec);
370
371 for (;;) {
372 snd_pcm_sframes_t n;
373 int r;
374
375 snd_pcm_hwsync(u->pcm_handle);
376
377 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
378
379 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
380 continue;
381
382 return r;
383 }
384
385 left_to_play = check_left_to_play(u, n);
386
387 if (u->use_tsched)
388
389 /* We won't fill up the playback buffer before at least
390 * half the sleep time is over because otherwise we might
391 * ask for more data from the clients then they expect. We
392 * need to guarantee that clients only have to keep around
393 * a single hw buffer length. */
394
395 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
396 break;
397
398 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
399 break;
400
401 n -= u->hwbuf_unused_frames;
402
403 for (;;) {
404 snd_pcm_sframes_t frames;
405 void *p;
406
407 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
408
409 if (u->memchunk.length <= 0)
410 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
411
412 pa_assert(u->memchunk.length > 0);
413
414 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
415
416 if (frames > n)
417 frames = n;
418
419 p = pa_memblock_acquire(u->memchunk.memblock);
420 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
421 pa_memblock_release(u->memchunk.memblock);
422
423 pa_assert(frames != 0);
424
425 if (PA_UNLIKELY(frames < 0)) {
426
427 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
428 continue;
429
430 return r;
431 }
432
433 u->memchunk.index += (size_t) frames * u->frame_size;
434 u->memchunk.length -= (size_t) frames * u->frame_size;
435
436 if (u->memchunk.length <= 0) {
437 pa_memblock_unref(u->memchunk.memblock);
438 pa_memchunk_reset(&u->memchunk);
439 }
440
441 work_done = 1;
442
443 u->frame_index += frames;
444 u->since_start += (size_t) frames * u->frame_size;
445
446 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
447
448 if (frames >= n)
449 break;
450
451 n -= frames;
452 }
453 }
454
455 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
456 return work_done;
457 }
458
459 static void update_smoother(struct userdata *u) {
460 snd_pcm_sframes_t delay = 0;
461 int64_t frames;
462 int err;
463 pa_usec_t now1, now2;
464 /* struct timeval timestamp; */
465 snd_pcm_status_t *status;
466
467 snd_pcm_status_alloca(&status);
468
469 pa_assert(u);
470 pa_assert(u->pcm_handle);
471
472 /* Let's update the time smoother */
473
474 snd_pcm_hwsync(u->pcm_handle);
475 snd_pcm_avail_update(u->pcm_handle);
476
477 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
478 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
479 /* return; */
480 /* } */
481
482 /* delay = snd_pcm_status_get_delay(status); */
483
484 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
485 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
486 return;
487 }
488
489 frames = u->frame_index - delay;
490
491 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
492
493 /* snd_pcm_status_get_tstamp(status, &timestamp); */
494 /* pa_rtclock_from_wallclock(&timestamp); */
495 /* now1 = pa_timeval_load(&timestamp); */
496
497 now1 = pa_rtclock_usec();
498 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
499 pa_smoother_put(u->smoother, now1, now2);
500 }
501
502 static pa_usec_t sink_get_latency(struct userdata *u) {
503 pa_usec_t r = 0;
504 int64_t delay;
505 pa_usec_t now1, now2;
506
507 pa_assert(u);
508
509 now1 = pa_rtclock_usec();
510 now2 = pa_smoother_get(u->smoother, now1);
511
512 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
513
514 if (delay > 0)
515 r = (pa_usec_t) delay;
516
517 if (u->memchunk.memblock)
518 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
519
520 return r;
521 }
522
523 static int build_pollfd(struct userdata *u) {
524 pa_assert(u);
525 pa_assert(u->pcm_handle);
526
527 if (u->alsa_rtpoll_item)
528 pa_rtpoll_item_free(u->alsa_rtpoll_item);
529
530 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
531 return -1;
532
533 return 0;
534 }
535
536 static int suspend(struct userdata *u) {
537 pa_assert(u);
538 pa_assert(u->pcm_handle);
539
540 pa_smoother_pause(u->smoother, pa_rtclock_usec());
541
542 /* Let's suspend */
543 snd_pcm_drain(u->pcm_handle);
544 snd_pcm_close(u->pcm_handle);
545 u->pcm_handle = NULL;
546
547 if (u->alsa_rtpoll_item) {
548 pa_rtpoll_item_free(u->alsa_rtpoll_item);
549 u->alsa_rtpoll_item = NULL;
550 }
551
552 pa_log_info("Device suspended...");
553
554 return 0;
555 }
556
557 static int update_sw_params(struct userdata *u) {
558 snd_pcm_uframes_t avail_min;
559 int err;
560
561 pa_assert(u);
562
563 /* Use the full buffer if noone asked us for anything specific */
564 u->hwbuf_unused_frames = 0;
565
566 if (u->use_tsched) {
567 pa_usec_t latency;
568
569 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
570 size_t b;
571
572 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
573
574 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
575
576 /* We need at least one sample in our buffer */
577
578 if (PA_UNLIKELY(b < u->frame_size))
579 b = u->frame_size;
580
581 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
582 (PA_LIKELY(b < u->hwbuf_size) ?
583 ((u->hwbuf_size - b) / u->frame_size) : 0);
584
585 fix_tsched_watermark(u);
586 }
587 }
588
589 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
590
591 /* We need at last one frame in the used part of the buffer */
592 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
593
594 if (u->use_tsched) {
595 pa_usec_t sleep_usec, process_usec;
596
597 hw_sleep_time(u, &sleep_usec, &process_usec);
598 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
599 }
600
601 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
602
603 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
604 pa_log("Failed to set software parameters: %s", snd_strerror(err));
605 return err;
606 }
607
608 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
609
610 return 0;
611 }
612
613 static int unsuspend(struct userdata *u) {
614 pa_sample_spec ss;
615 int err;
616 pa_bool_t b, d;
617 unsigned nfrags;
618 snd_pcm_uframes_t period_size;
619
620 pa_assert(u);
621 pa_assert(!u->pcm_handle);
622
623 pa_log_info("Trying resume...");
624
625 snd_config_update_free_global();
626 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
627 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
628 goto fail;
629 }
630
631 ss = u->sink->sample_spec;
632 nfrags = u->nfragments;
633 period_size = u->fragment_size / u->frame_size;
634 b = u->use_mmap;
635 d = u->use_tsched;
636
637 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
638 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
639 goto fail;
640 }
641
642 if (b != u->use_mmap || d != u->use_tsched) {
643 pa_log_warn("Resume failed, couldn't get original access mode.");
644 goto fail;
645 }
646
647 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
648 pa_log_warn("Resume failed, couldn't restore original sample settings.");
649 goto fail;
650 }
651
652 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
653 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
654 goto fail;
655 }
656
657 if (update_sw_params(u) < 0)
658 goto fail;
659
660 if (build_pollfd(u) < 0)
661 goto fail;
662
663 /* FIXME: We need to reload the volume somehow */
664
665 u->first = TRUE;
666 u->since_start = 0;
667
668 pa_log_info("Resumed successfully...");
669
670 return 0;
671
672 fail:
673 if (u->pcm_handle) {
674 snd_pcm_close(u->pcm_handle);
675 u->pcm_handle = NULL;
676 }
677
678 return -1;
679 }
680
681 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
682 struct userdata *u = PA_SINK(o)->userdata;
683
684 switch (code) {
685
686 case PA_SINK_MESSAGE_GET_LATENCY: {
687 pa_usec_t r = 0;
688
689 if (u->pcm_handle)
690 r = sink_get_latency(u);
691
692 *((pa_usec_t*) data) = r;
693
694 return 0;
695 }
696
697 case PA_SINK_MESSAGE_SET_STATE:
698
699 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
700
701 case PA_SINK_SUSPENDED:
702 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
703
704 if (suspend(u) < 0)
705 return -1;
706
707 break;
708
709 case PA_SINK_IDLE:
710 case PA_SINK_RUNNING:
711
712 if (u->sink->thread_info.state == PA_SINK_INIT) {
713 if (build_pollfd(u) < 0)
714 return -1;
715 }
716
717 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
718 if (unsuspend(u) < 0)
719 return -1;
720 }
721
722 break;
723
724 case PA_SINK_UNLINKED:
725 case PA_SINK_INIT:
726 ;
727 }
728
729 break;
730 }
731
732 return pa_sink_process_msg(o, code, data, offset, chunk);
733 }
734
735 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
736 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
737
738 pa_assert(u);
739 pa_assert(u->mixer_handle);
740
741 if (mask == SND_CTL_EVENT_MASK_REMOVE)
742 return 0;
743
744 if (mask & SND_CTL_EVENT_MASK_VALUE) {
745 pa_sink_get_volume(u->sink, TRUE);
746 pa_sink_get_mute(u->sink, TRUE);
747 }
748
749 return 0;
750 }
751
752 static int sink_get_volume_cb(pa_sink *s) {
753 struct userdata *u = s->userdata;
754 int err;
755 unsigned i;
756 pa_cvolume r;
757 char t[PA_CVOLUME_SNPRINT_MAX];
758
759 pa_assert(u);
760 pa_assert(u->mixer_elem);
761
762 if (u->mixer_seperate_channels) {
763
764 r.channels = s->sample_spec.channels;
765
766 for (i = 0; i < s->sample_spec.channels; i++) {
767 long alsa_vol;
768
769 if (u->hw_dB_supported) {
770
771 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
772 goto fail;
773
774 #ifdef HAVE_VALGRIND_MEMCHECK_H
775 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
776 #endif
777
778 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
779 } else {
780
781 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
782 goto fail;
783
784 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
785 }
786 }
787
788 } else {
789 long alsa_vol;
790
791 pa_assert(u->hw_dB_supported);
792
793 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
794 goto fail;
795
796 #ifdef HAVE_VALGRIND_MEMCHECK_H
797 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
798 #endif
799
800 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
801 }
802
803 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
804
805 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
806
807 u->hardware_volume = s->volume = r;
808
809 if (u->hw_dB_supported) {
810 pa_cvolume reset;
811
812 /* Hmm, so the hardware volume changed, let's reset our software volume */
813
814 pa_cvolume_reset(&reset, s->sample_spec.channels);
815 pa_sink_set_soft_volume(s, &reset);
816 }
817 }
818
819 return 0;
820
821 fail:
822 pa_log_error("Unable to read volume: %s", snd_strerror(err));
823
824 return -1;
825 }
826
827 static int sink_set_volume_cb(pa_sink *s) {
828 struct userdata *u = s->userdata;
829 int err;
830 unsigned i;
831 pa_cvolume r;
832
833 pa_assert(u);
834 pa_assert(u->mixer_elem);
835
836 if (u->mixer_seperate_channels) {
837
838 r.channels = s->sample_spec.channels;
839
840 for (i = 0; i < s->sample_spec.channels; i++) {
841 long alsa_vol;
842 pa_volume_t vol;
843
844 vol = s->volume.values[i];
845
846 if (u->hw_dB_supported) {
847
848 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
849 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
850
851 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
852 goto fail;
853
854 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
855 goto fail;
856
857 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
858 } else {
859
860 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
861 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
862
863 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
864 goto fail;
865
866 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
867 goto fail;
868
869 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
870 }
871 }
872
873 } else {
874 pa_volume_t vol;
875 long alsa_vol;
876
877 pa_assert(u->hw_dB_supported);
878
879 vol = pa_cvolume_max(&s->volume);
880
881 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
882 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
883
884 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
885 goto fail;
886
887 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
888 goto fail;
889
890 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
891 }
892
893 u->hardware_volume = r;
894
895 if (u->hw_dB_supported) {
896 char t[PA_CVOLUME_SNPRINT_MAX];
897
898 /* Match exactly what the user requested by software */
899
900 pa_alsa_volume_divide(&r, &s->volume);
901 pa_sink_set_soft_volume(s, &r);
902
903 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
904 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
905 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
906
907 } else
908
909 /* We can't match exactly what the user requested, hence let's
910 * at least tell the user about it */
911
912 s->volume = r;
913
914 return 0;
915
916 fail:
917 pa_log_error("Unable to set volume: %s", snd_strerror(err));
918
919 return -1;
920 }
921
922 static int sink_get_mute_cb(pa_sink *s) {
923 struct userdata *u = s->userdata;
924 int err, sw;
925
926 pa_assert(u);
927 pa_assert(u->mixer_elem);
928
929 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
930 pa_log_error("Unable to get switch: %s", snd_strerror(err));
931 return -1;
932 }
933
934 s->muted = !sw;
935
936 return 0;
937 }
938
939 static int sink_set_mute_cb(pa_sink *s) {
940 struct userdata *u = s->userdata;
941 int err;
942
943 pa_assert(u);
944 pa_assert(u->mixer_elem);
945
946 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
947 pa_log_error("Unable to set switch: %s", snd_strerror(err));
948 return -1;
949 }
950
951 return 0;
952 }
953
954 static void sink_update_requested_latency_cb(pa_sink *s) {
955 struct userdata *u = s->userdata;
956 snd_pcm_sframes_t before;
957 pa_assert(u);
958
959 if (!u->pcm_handle)
960 return;
961
962 before = u->hwbuf_unused_frames;
963 update_sw_params(u);
964
965 /* Let's check whether we now use only a smaller part of the
966 buffer then before. If so, we need to make sure that subsequent
967 rewinds are relative to the new maxium fill level and not to the
968 current fill level. Thus, let's do a full rewind once, to clear
969 things up. */
970
971 if (u->hwbuf_unused_frames > before) {
972 pa_log_debug("Requesting rewind due to latency change.");
973 pa_sink_request_rewind(s, (size_t) -1);
974 }
975 }
976
977 static int process_rewind(struct userdata *u) {
978 snd_pcm_sframes_t unused;
979 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
980 pa_assert(u);
981
982 /* Figure out how much we shall rewind and reset the counter */
983 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
984 u->sink->thread_info.rewind_nbytes = 0;
985
986 if (rewind_nbytes <= 0)
987 goto finish;
988
989 pa_assert(rewind_nbytes > 0);
990 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
991
992 snd_pcm_hwsync(u->pcm_handle);
993 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
994 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
995 return -1;
996 }
997
998 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
999
1000 if (u->hwbuf_size > unused_nbytes)
1001 limit_nbytes = u->hwbuf_size - unused_nbytes;
1002 else
1003 limit_nbytes = 0;
1004
1005 if (rewind_nbytes > limit_nbytes)
1006 rewind_nbytes = limit_nbytes;
1007
1008 if (rewind_nbytes > 0) {
1009 snd_pcm_sframes_t in_frames, out_frames;
1010
1011 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1012
1013 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1014 pa_log_debug("before: %lu", (unsigned long) in_frames);
1015 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1016 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1017 return -1;
1018 }
1019 pa_log_debug("after: %lu", (unsigned long) out_frames);
1020
1021 rewind_nbytes = (size_t) out_frames * u->frame_size;
1022
1023 if (rewind_nbytes <= 0)
1024 pa_log_info("Tried rewind, but was apparently not possible.");
1025 else {
1026 u->frame_index -= out_frames;
1027 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1028 pa_sink_process_rewind(u->sink, rewind_nbytes);
1029
1030 u->after_rewind = TRUE;
1031 return 0;
1032 }
1033 } else
1034 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1035
1036 finish:
1037
1038 pa_sink_process_rewind(u->sink, 0);
1039
1040 return 0;
1041
1042 }
1043
1044 static void thread_func(void *userdata) {
1045 struct userdata *u = userdata;
1046
1047 pa_assert(u);
1048
1049 pa_log_debug("Thread starting up");
1050
1051 if (u->core->realtime_scheduling)
1052 pa_make_realtime(u->core->realtime_priority);
1053
1054 pa_thread_mq_install(&u->thread_mq);
1055 pa_rtpoll_install(u->rtpoll);
1056
1057 for (;;) {
1058 int ret;
1059
1060 /* pa_log_debug("loop"); */
1061
1062 /* Render some data and write it to the dsp */
1063 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1064 int work_done;
1065 pa_usec_t sleep_usec = 0;
1066
1067 if (u->sink->thread_info.rewind_requested)
1068 if (process_rewind(u) < 0)
1069 goto fail;
1070
1071 if (u->use_mmap)
1072 work_done = mmap_write(u, &sleep_usec);
1073 else
1074 work_done = unix_write(u, &sleep_usec);
1075
1076 if (work_done < 0)
1077 goto fail;
1078
1079 /* pa_log_debug("work_done = %i", work_done); */
1080
1081 if (work_done) {
1082
1083 if (u->first) {
1084 pa_log_info("Starting playback.");
1085 snd_pcm_start(u->pcm_handle);
1086
1087 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1088 }
1089
1090 update_smoother(u);
1091 }
1092
1093 if (u->use_tsched) {
1094 pa_usec_t cusec;
1095
1096 if (u->since_start <= u->hwbuf_size) {
1097
1098 /* USB devices on ALSA seem to hit a buffer
1099 * underrun during the first iterations much
1100 * quicker then we calculate here, probably due to
1101 * the transport latency. To accomodate for that
1102 * we artificially decrease the sleep time until
1103 * we have filled the buffer at least once
1104 * completely.*/
1105
1106 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1107 sleep_usec /= 2;
1108 }
1109
1110 /* OK, the playback buffer is now full, let's
1111 * calculate when to wake up next */
1112 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1113
1114 /* Convert from the sound card time domain to the
1115 * system time domain */
1116 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1117
1118 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1119
1120 /* We don't trust the conversion, so we wake up whatever comes first */
1121 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1122 }
1123
1124 u->first = FALSE;
1125 u->after_rewind = FALSE;
1126
1127 } else if (u->use_tsched)
1128
1129 /* OK, we're in an invalid state, let's disable our timers */
1130 pa_rtpoll_set_timer_disabled(u->rtpoll);
1131
1132 /* Hmm, nothing to do. Let's sleep */
1133 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1134 goto fail;
1135
1136 if (ret == 0)
1137 goto finish;
1138
1139 /* Tell ALSA about this and process its response */
1140 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1141 struct pollfd *pollfd;
1142 unsigned short revents = 0;
1143 int err;
1144 unsigned n;
1145
1146 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1147
1148 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1149 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1150 goto fail;
1151 }
1152
1153 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1154 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1155 goto fail;
1156
1157 u->first = TRUE;
1158 u->since_start = 0;
1159 }
1160
1161 if (revents && u->use_tsched)
1162 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1163 }
1164 }
1165
1166 fail:
1167 /* If this was no regular exit from the loop we have to continue
1168 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1169 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1170 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1171
1172 finish:
1173 pa_log_debug("Thread shutting down");
1174 }
1175
1176 int pa__init(pa_module*m) {
1177
1178 pa_modargs *ma = NULL;
1179 struct userdata *u = NULL;
1180 const char *dev_id;
1181 pa_sample_spec ss;
1182 pa_channel_map map;
1183 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1184 snd_pcm_uframes_t period_frames, tsched_frames;
1185 size_t frame_size;
1186 snd_pcm_info_t *pcm_info = NULL;
1187 int err;
1188 const char *name;
1189 char *name_buf = NULL;
1190 pa_bool_t namereg_fail;
1191 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1192 pa_usec_t usec;
1193 pa_sink_new_data data;
1194
1195 snd_pcm_info_alloca(&pcm_info);
1196
1197 pa_assert(m);
1198
1199 pa_alsa_redirect_errors_inc();
1200
1201 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1202 pa_log("Failed to parse module arguments");
1203 goto fail;
1204 }
1205
1206 ss = m->core->default_sample_spec;
1207 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1208 pa_log("Failed to parse sample specification and channel map");
1209 goto fail;
1210 }
1211
1212 frame_size = pa_frame_size(&ss);
1213
1214 nfrags = m->core->default_n_fragments;
1215 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1216 if (frag_size <= 0)
1217 frag_size = (uint32_t) frame_size;
1218 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1219 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1220
1221 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1222 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1223 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1224 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1225 pa_log("Failed to parse buffer metrics");
1226 goto fail;
1227 }
1228
1229 hwbuf_size = frag_size * nfrags;
1230 period_frames = frag_size/frame_size;
1231 tsched_frames = tsched_size/frame_size;
1232
1233 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1234 pa_log("Failed to parse mmap argument.");
1235 goto fail;
1236 }
1237
1238 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1239 pa_log("Failed to parse tsched argument.");
1240 goto fail;
1241 }
1242
1243 if (use_tsched && !pa_rtclock_hrtimer()) {
1244 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1245 use_tsched = FALSE;
1246 }
1247
1248 u = pa_xnew0(struct userdata, 1);
1249 u->core = m->core;
1250 u->module = m;
1251 m->userdata = u;
1252 u->use_mmap = use_mmap;
1253 u->use_tsched = use_tsched;
1254 u->first = TRUE;
1255 u->since_start = 0;
1256 u->after_rewind = FALSE;
1257 u->rtpoll = pa_rtpoll_new();
1258 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1259 u->alsa_rtpoll_item = NULL;
1260
1261 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1262 usec = pa_rtclock_usec();
1263 pa_smoother_set_time_offset(u->smoother, usec);
1264 pa_smoother_pause(u->smoother, usec);
1265
1266 snd_config_update_free_global();
1267
1268 b = use_mmap;
1269 d = use_tsched;
1270
1271 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1272
1273 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1274 dev_id,
1275 &u->device_name,
1276 &ss, &map,
1277 SND_PCM_STREAM_PLAYBACK,
1278 &nfrags, &period_frames, tsched_frames,
1279 &b, &d)))
1280
1281 goto fail;
1282
1283 } else {
1284
1285 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1286 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1287 &u->device_name,
1288 &ss, &map,
1289 SND_PCM_STREAM_PLAYBACK,
1290 &nfrags, &period_frames, tsched_frames,
1291 &b, &d)))
1292 goto fail;
1293
1294 }
1295
1296 pa_assert(u->device_name);
1297 pa_log_info("Successfully opened device %s.", u->device_name);
1298
1299 if (use_mmap && !b) {
1300 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1301 u->use_mmap = use_mmap = FALSE;
1302 }
1303
1304 if (use_tsched && (!b || !d)) {
1305 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1306 u->use_tsched = use_tsched = FALSE;
1307 }
1308
1309 if (u->use_mmap)
1310 pa_log_info("Successfully enabled mmap() mode.");
1311
1312 if (u->use_tsched)
1313 pa_log_info("Successfully enabled timer-based scheduling mode.");
1314
1315 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1316 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1317 goto fail;
1318 }
1319
1320 /* ALSA might tweak the sample spec, so recalculate the frame size */
1321 frame_size = pa_frame_size(&ss);
1322
1323 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1324 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1325 else {
1326 pa_bool_t found = FALSE;
1327
1328 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1329 found = TRUE;
1330 else {
1331 snd_pcm_info_t *info;
1332
1333 snd_pcm_info_alloca(&info);
1334
1335 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1336 char *md;
1337 int card;
1338
1339 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1340
1341 md = pa_sprintf_malloc("hw:%i", card);
1342
1343 if (strcmp(u->device_name, md))
1344 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1345 found = TRUE;
1346 pa_xfree(md);
1347 }
1348 }
1349 }
1350
1351 if (found)
1352 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1353 found = FALSE;
1354
1355 if (!found) {
1356 snd_mixer_close(u->mixer_handle);
1357 u->mixer_handle = NULL;
1358 }
1359 }
1360
1361 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1362 namereg_fail = TRUE;
1363 else {
1364 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1365 namereg_fail = FALSE;
1366 }
1367
1368 pa_sink_new_data_init(&data);
1369 data.driver = __FILE__;
1370 data.module = m;
1371 pa_sink_new_data_set_name(&data, name);
1372 data.namereg_fail = namereg_fail;
1373 pa_sink_new_data_set_sample_spec(&data, &ss);
1374 pa_sink_new_data_set_channel_map(&data, &map);
1375
1376 pa_alsa_init_proplist(data.proplist, pcm_info);
1377 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1378 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1379 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1380 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1381
1382 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1383 pa_sink_new_data_done(&data);
1384 pa_xfree(name_buf);
1385
1386 if (!u->sink) {
1387 pa_log("Failed to create sink object");
1388 goto fail;
1389 }
1390
1391 u->sink->parent.process_msg = sink_process_msg;
1392 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1393 u->sink->userdata = u;
1394
1395 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1396 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1397
1398 u->frame_size = frame_size;
1399 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1400 u->nfragments = nfrags;
1401 u->hwbuf_size = u->fragment_size * nfrags;
1402 u->hwbuf_unused_frames = 0;
1403 u->tsched_watermark = tsched_watermark;
1404 u->frame_index = 0;
1405 u->hw_dB_supported = FALSE;
1406 u->hw_dB_min = u->hw_dB_max = 0;
1407 u->hw_volume_min = u->hw_volume_max = 0;
1408 u->mixer_seperate_channels = FALSE;
1409 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1410
1411 if (use_tsched)
1412 fix_tsched_watermark(u);
1413
1414 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1415 u->sink->thread_info.max_request = u->hwbuf_size;
1416
1417 pa_sink_set_latency_range(u->sink,
1418 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1419 pa_bytes_to_usec(u->hwbuf_size, &ss));
1420
1421 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1422 nfrags, (long unsigned) u->fragment_size,
1423 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1424
1425 if (use_tsched)
1426 pa_log_info("Time scheduling watermark is %0.2fms",
1427 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1428
1429 if (update_sw_params(u) < 0)
1430 goto fail;
1431
1432 pa_memchunk_reset(&u->memchunk);
1433
1434 if (u->mixer_handle) {
1435 pa_assert(u->mixer_elem);
1436
1437 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1438 pa_bool_t suitable = TRUE;
1439
1440 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0) {
1441 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1442 suitable = FALSE;
1443 } else {
1444 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1445 pa_assert(u->hw_volume_min < u->hw_volume_max);
1446 }
1447
1448 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1449 pa_log_info("Mixer doesn't support dB information.");
1450 else {
1451 #ifdef HAVE_VALGRIND_MEMCHECK_H
1452 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1453 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1454 #endif
1455
1456 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1457 pa_assert(u->hw_dB_min < u->hw_dB_max);
1458 u->hw_dB_supported = TRUE;
1459 }
1460
1461 if (suitable &&
1462 !u->hw_dB_supported &&
1463 u->hw_volume_max - u->hw_volume_min < 3) {
1464
1465 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1466 suitable = FALSE;
1467 }
1468
1469 if (suitable) {
1470 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1471
1472 u->sink->get_volume = sink_get_volume_cb;
1473 u->sink->set_volume = sink_set_volume_cb;
1474 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1475 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1476
1477 } else
1478 pa_log_info("Using software volume control.");
1479 }
1480
1481 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1482 u->sink->get_mute = sink_get_mute_cb;
1483 u->sink->set_mute = sink_set_mute_cb;
1484 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1485 } else
1486 pa_log_info("Using software mute control.");
1487
1488 u->mixer_fdl = pa_alsa_fdlist_new();
1489
1490 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1491 pa_log("Failed to initialize file descriptor monitoring");
1492 goto fail;
1493 }
1494
1495 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1496 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1497 } else
1498 u->mixer_fdl = NULL;
1499
1500 pa_alsa_dump(u->pcm_handle);
1501
1502 if (!(u->thread = pa_thread_new(thread_func, u))) {
1503 pa_log("Failed to create thread.");
1504 goto fail;
1505 }
1506
1507 /* Get initial mixer settings */
1508 if (data.volume_is_set) {
1509 if (u->sink->set_volume)
1510 u->sink->set_volume(u->sink);
1511 } else {
1512 if (u->sink->get_volume)
1513 u->sink->get_volume(u->sink);
1514 }
1515
1516 if (data.muted_is_set) {
1517 if (u->sink->set_mute)
1518 u->sink->set_mute(u->sink);
1519 } else {
1520 if (u->sink->get_mute)
1521 u->sink->get_mute(u->sink);
1522 }
1523
1524 pa_sink_put(u->sink);
1525
1526 pa_modargs_free(ma);
1527
1528 return 0;
1529
1530 fail:
1531
1532 if (ma)
1533 pa_modargs_free(ma);
1534
1535 pa__done(m);
1536
1537 return -1;
1538 }
1539
1540 void pa__done(pa_module*m) {
1541 struct userdata *u;
1542
1543 pa_assert(m);
1544
1545 if (!(u = m->userdata)) {
1546 pa_alsa_redirect_errors_dec();
1547 return;
1548 }
1549
1550 if (u->sink)
1551 pa_sink_unlink(u->sink);
1552
1553 if (u->thread) {
1554 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1555 pa_thread_free(u->thread);
1556 }
1557
1558 pa_thread_mq_done(&u->thread_mq);
1559
1560 if (u->sink)
1561 pa_sink_unref(u->sink);
1562
1563 if (u->memchunk.memblock)
1564 pa_memblock_unref(u->memchunk.memblock);
1565
1566 if (u->alsa_rtpoll_item)
1567 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1568
1569 if (u->rtpoll)
1570 pa_rtpoll_free(u->rtpoll);
1571
1572 if (u->mixer_fdl)
1573 pa_alsa_fdlist_free(u->mixer_fdl);
1574
1575 if (u->mixer_handle)
1576 snd_mixer_close(u->mixer_handle);
1577
1578 if (u->pcm_handle) {
1579 snd_pcm_drop(u->pcm_handle);
1580 snd_pcm_close(u->pcm_handle);
1581 }
1582
1583 if (u->smoother)
1584 pa_smoother_free(u->smoother);
1585
1586 pa_xfree(u->device_name);
1587 pa_xfree(u);
1588
1589 snd_config_update_free_global();
1590
1591 pa_alsa_redirect_errors_dec();
1592 }