]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
When returning from a suspend, pass exactly the same flags as originally when
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
57
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
67 "rate=<sample rate> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
76
77 static const char* const valid_modargs[] = {
78 "sink_name",
79 "device",
80 "device_id",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 "fragments",
86 "fragment_size",
87 "mmap",
88 "tsched",
89 "tsched_buffer_size",
90 "tsched_buffer_watermark",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117 pa_bool_t mixer_seperate_channels;
118 pa_cvolume hardware_volume;
119
120 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
121 unsigned nfragments;
122 pa_memchunk memchunk;
123
124 char *device_name;
125
126 pa_bool_t use_mmap, use_tsched;
127
128 pa_bool_t first, after_rewind;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
133
134 pa_smoother *smoother;
135 int64_t frame_index;
136 uint64_t since_start;
137
138 snd_pcm_sframes_t hwbuf_unused_frames;
139 };
140
141 static void fix_tsched_watermark(struct userdata *u) {
142 size_t max_use;
143 size_t min_sleep, min_wakeup;
144 pa_assert(u);
145
146 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
147
148 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
149 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
150
151 if (min_sleep > max_use/2)
152 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
153 if (min_sleep < u->frame_size)
154 min_sleep = u->frame_size;
155
156 if (min_wakeup > max_use/2)
157 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
158 if (min_wakeup < u->frame_size)
159 min_wakeup = u->frame_size;
160
161 if (u->tsched_watermark > max_use-min_sleep)
162 u->tsched_watermark = max_use-min_sleep;
163
164 if (u->tsched_watermark < min_wakeup)
165 u->tsched_watermark = min_wakeup;
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
182
183 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
184
185 if (usec >= wm) {
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188 } else
189 *process_usec = *sleep_usec = usec / 2;
190
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 }
193
194 static int try_recover(struct userdata *u, const char *call, int err) {
195 pa_assert(u);
196 pa_assert(call);
197 pa_assert(err < 0);
198
199 pa_log_debug("%s: %s", call, snd_strerror(err));
200
201 pa_assert(err != -EAGAIN);
202
203 if (err == -EPIPE)
204 pa_log_debug("%s: Buffer underrun!", call);
205
206 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
207 u->first = TRUE;
208 u->since_start = 0;
209 return 0;
210 }
211
212 pa_log("%s: %s", call, snd_strerror(err));
213 return -1;
214 }
215
216 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
217 size_t left_to_play;
218
219 if ((size_t) n*u->frame_size < u->hwbuf_size)
220 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
221 else
222 left_to_play = 0;
223
224 if (left_to_play > 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u->first && !u->after_rewind) {
227 pa_log_info("Underrun!");
228
229 if (u->use_tsched) {
230 size_t old_watermark = u->tsched_watermark;
231
232 u->tsched_watermark *= 2;
233 fix_tsched_watermark(u);
234
235 if (old_watermark != u->tsched_watermark)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
238 }
239 }
240
241 return left_to_play;
242 }
243
244 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec) {
245 int work_done = 0;
246 pa_usec_t max_sleep_usec = 0, process_usec = 0;
247 size_t left_to_play;
248
249 pa_assert(u);
250 pa_sink_assert_ref(u->sink);
251
252 if (u->use_tsched)
253 hw_sleep_time(u, &max_sleep_usec, &process_usec);
254
255 for (;;) {
256 snd_pcm_sframes_t n;
257 int r;
258
259 snd_pcm_hwsync(u->pcm_handle);
260
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
263
264 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
265
266 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
267 continue;
268
269 return r;
270 }
271
272 left_to_play = check_left_to_play(u, n);
273
274 if (u->use_tsched)
275
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
281
282 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
283 break;
284
285 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
286 break;
287
288 n -= u->hwbuf_unused_frames;
289
290 /* pa_log_debug("Filling up"); */
291
292 for (;;) {
293 pa_memchunk chunk;
294 void *p;
295 int err;
296 const snd_pcm_channel_area_t *areas;
297 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
298 snd_pcm_sframes_t sframes;
299
300 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
301
302 if (PA_UNLIKELY((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0)) {
303
304 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
305 continue;
306
307 return r;
308 }
309
310 /* Make sure that if these memblocks need to be copied they will fit into one slot */
311 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
312 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
313
314 /* Check these are multiples of 8 bit */
315 pa_assert((areas[0].first & 7) == 0);
316 pa_assert((areas[0].step & 7)== 0);
317
318 /* We assume a single interleaved memory buffer */
319 pa_assert((areas[0].first >> 3) == 0);
320 pa_assert((areas[0].step >> 3) == u->frame_size);
321
322 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
323
324 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
325 chunk.length = pa_memblock_get_length(chunk.memblock);
326 chunk.index = 0;
327
328 pa_sink_render_into_full(u->sink, &chunk);
329
330 /* FIXME: Maybe we can do something to keep this memory block
331 * a little bit longer around? */
332 pa_memblock_unref_fixed(chunk.memblock);
333
334 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
335
336 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
337 continue;
338
339 return r;
340 }
341
342 work_done = 1;
343
344 u->frame_index += (int64_t) frames;
345 u->since_start += frames * u->frame_size;
346
347 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
348
349 if (frames >= (snd_pcm_uframes_t) n)
350 break;
351
352 n -= (snd_pcm_sframes_t) frames;
353 }
354 }
355
356 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
357 return work_done;
358 }
359
360 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec) {
361 int work_done = 0;
362 pa_usec_t max_sleep_usec = 0, process_usec = 0;
363 size_t left_to_play;
364
365 pa_assert(u);
366 pa_sink_assert_ref(u->sink);
367
368 if (u->use_tsched)
369 hw_sleep_time(u, &max_sleep_usec, &process_usec);
370
371 for (;;) {
372 snd_pcm_sframes_t n;
373 int r;
374
375 snd_pcm_hwsync(u->pcm_handle);
376
377 if (PA_UNLIKELY((n = snd_pcm_avail_update(u->pcm_handle)) < 0)) {
378
379 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
380 continue;
381
382 return r;
383 }
384
385 left_to_play = check_left_to_play(u, n);
386
387 if (u->use_tsched)
388
389 /* We won't fill up the playback buffer before at least
390 * half the sleep time is over because otherwise we might
391 * ask for more data from the clients then they expect. We
392 * need to guarantee that clients only have to keep around
393 * a single hw buffer length. */
394
395 if (pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
396 break;
397
398 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames))
399 break;
400
401 n -= u->hwbuf_unused_frames;
402
403 for (;;) {
404 snd_pcm_sframes_t frames;
405 void *p;
406
407 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
408
409 if (u->memchunk.length <= 0)
410 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
411
412 pa_assert(u->memchunk.length > 0);
413
414 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
415
416 if (frames > n)
417 frames = n;
418
419 p = pa_memblock_acquire(u->memchunk.memblock);
420 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
421 pa_memblock_release(u->memchunk.memblock);
422
423 pa_assert(frames != 0);
424
425 if (PA_UNLIKELY(frames < 0)) {
426
427 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
428 continue;
429
430 return r;
431 }
432
433 u->memchunk.index += (size_t) frames * u->frame_size;
434 u->memchunk.length -= (size_t) frames * u->frame_size;
435
436 if (u->memchunk.length <= 0) {
437 pa_memblock_unref(u->memchunk.memblock);
438 pa_memchunk_reset(&u->memchunk);
439 }
440
441 work_done = 1;
442
443 u->frame_index += frames;
444 u->since_start += (size_t) frames * u->frame_size;
445
446 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
447
448 if (frames >= n)
449 break;
450
451 n -= frames;
452 }
453 }
454
455 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
456 return work_done;
457 }
458
459 static void update_smoother(struct userdata *u) {
460 snd_pcm_sframes_t delay = 0;
461 int64_t frames;
462 int err;
463 pa_usec_t now1, now2;
464 /* struct timeval timestamp; */
465 snd_pcm_status_t *status;
466
467 snd_pcm_status_alloca(&status);
468
469 pa_assert(u);
470 pa_assert(u->pcm_handle);
471
472 /* Let's update the time smoother */
473
474 snd_pcm_hwsync(u->pcm_handle);
475 snd_pcm_avail_update(u->pcm_handle);
476
477 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
478 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
479 /* return; */
480 /* } */
481
482 /* delay = snd_pcm_status_get_delay(status); */
483
484 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
485 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
486 return;
487 }
488
489 frames = u->frame_index - delay;
490
491 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
492
493 /* snd_pcm_status_get_tstamp(status, &timestamp); */
494 /* pa_rtclock_from_wallclock(&timestamp); */
495 /* now1 = pa_timeval_load(&timestamp); */
496
497 now1 = pa_rtclock_usec();
498 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
499 pa_smoother_put(u->smoother, now1, now2);
500 }
501
502 static pa_usec_t sink_get_latency(struct userdata *u) {
503 pa_usec_t r = 0;
504 int64_t delay;
505 pa_usec_t now1, now2;
506
507 pa_assert(u);
508
509 now1 = pa_rtclock_usec();
510 now2 = pa_smoother_get(u->smoother, now1);
511
512 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
513
514 if (delay > 0)
515 r = (pa_usec_t) delay;
516
517 if (u->memchunk.memblock)
518 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
519
520 return r;
521 }
522
523 static int build_pollfd(struct userdata *u) {
524 pa_assert(u);
525 pa_assert(u->pcm_handle);
526
527 if (u->alsa_rtpoll_item)
528 pa_rtpoll_item_free(u->alsa_rtpoll_item);
529
530 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
531 return -1;
532
533 return 0;
534 }
535
536 static int suspend(struct userdata *u) {
537 pa_assert(u);
538 pa_assert(u->pcm_handle);
539
540 pa_smoother_pause(u->smoother, pa_rtclock_usec());
541
542 /* Let's suspend */
543 snd_pcm_drain(u->pcm_handle);
544 snd_pcm_close(u->pcm_handle);
545 u->pcm_handle = NULL;
546
547 if (u->alsa_rtpoll_item) {
548 pa_rtpoll_item_free(u->alsa_rtpoll_item);
549 u->alsa_rtpoll_item = NULL;
550 }
551
552 pa_log_info("Device suspended...");
553
554 return 0;
555 }
556
557 static int update_sw_params(struct userdata *u) {
558 snd_pcm_uframes_t avail_min;
559 int err;
560
561 pa_assert(u);
562
563 /* Use the full buffer if noone asked us for anything specific */
564 u->hwbuf_unused_frames = 0;
565
566 if (u->use_tsched) {
567 pa_usec_t latency;
568
569 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
570 size_t b;
571
572 pa_log_debug("latency set to %0.2f", (double) latency / PA_USEC_PER_MSEC);
573
574 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
575
576 /* We need at least one sample in our buffer */
577
578 if (PA_UNLIKELY(b < u->frame_size))
579 b = u->frame_size;
580
581 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
582 (PA_LIKELY(b < u->hwbuf_size) ?
583 ((u->hwbuf_size - b) / u->frame_size) : 0);
584
585 fix_tsched_watermark(u);
586 }
587 }
588
589 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
590
591 /* We need at last one frame in the used part of the buffer */
592 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
593
594 if (u->use_tsched) {
595 pa_usec_t sleep_usec, process_usec;
596
597 hw_sleep_time(u, &sleep_usec, &process_usec);
598 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
599 }
600
601 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
602
603 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
604 pa_log("Failed to set software parameters: %s", snd_strerror(err));
605 return err;
606 }
607
608 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
609
610 return 0;
611 }
612
613 static int unsuspend(struct userdata *u) {
614 pa_sample_spec ss;
615 int err;
616 pa_bool_t b, d;
617 unsigned nfrags;
618 snd_pcm_uframes_t period_size;
619
620 pa_assert(u);
621 pa_assert(!u->pcm_handle);
622
623 pa_log_info("Trying resume...");
624
625 snd_config_update_free_global();
626 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
627 /*SND_PCM_NONBLOCK|*/
628 SND_PCM_NO_AUTO_RESAMPLE|
629 SND_PCM_NO_AUTO_CHANNELS|
630 SND_PCM_NO_AUTO_FORMAT)) < 0) {
631 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
632 goto fail;
633 }
634
635 ss = u->sink->sample_spec;
636 nfrags = u->nfragments;
637 period_size = u->fragment_size / u->frame_size;
638 b = u->use_mmap;
639 d = u->use_tsched;
640
641 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
642 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
643 goto fail;
644 }
645
646 if (b != u->use_mmap || d != u->use_tsched) {
647 pa_log_warn("Resume failed, couldn't get original access mode.");
648 goto fail;
649 }
650
651 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
652 pa_log_warn("Resume failed, couldn't restore original sample settings.");
653 goto fail;
654 }
655
656 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
657 pa_log_warn("Resume failed, couldn't restore original fragment settings.");
658 goto fail;
659 }
660
661 if (update_sw_params(u) < 0)
662 goto fail;
663
664 if (build_pollfd(u) < 0)
665 goto fail;
666
667 /* FIXME: We need to reload the volume somehow */
668
669 u->first = TRUE;
670 u->since_start = 0;
671
672 pa_log_info("Resumed successfully...");
673
674 return 0;
675
676 fail:
677 if (u->pcm_handle) {
678 snd_pcm_close(u->pcm_handle);
679 u->pcm_handle = NULL;
680 }
681
682 return -1;
683 }
684
685 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
686 struct userdata *u = PA_SINK(o)->userdata;
687
688 switch (code) {
689
690 case PA_SINK_MESSAGE_GET_LATENCY: {
691 pa_usec_t r = 0;
692
693 if (u->pcm_handle)
694 r = sink_get_latency(u);
695
696 *((pa_usec_t*) data) = r;
697
698 return 0;
699 }
700
701 case PA_SINK_MESSAGE_SET_STATE:
702
703 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
704
705 case PA_SINK_SUSPENDED:
706 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
707
708 if (suspend(u) < 0)
709 return -1;
710
711 break;
712
713 case PA_SINK_IDLE:
714 case PA_SINK_RUNNING:
715
716 if (u->sink->thread_info.state == PA_SINK_INIT) {
717 if (build_pollfd(u) < 0)
718 return -1;
719 }
720
721 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
722 if (unsuspend(u) < 0)
723 return -1;
724 }
725
726 break;
727
728 case PA_SINK_UNLINKED:
729 case PA_SINK_INIT:
730 ;
731 }
732
733 break;
734 }
735
736 return pa_sink_process_msg(o, code, data, offset, chunk);
737 }
738
739 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
740 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
741
742 pa_assert(u);
743 pa_assert(u->mixer_handle);
744
745 if (mask == SND_CTL_EVENT_MASK_REMOVE)
746 return 0;
747
748 if (mask & SND_CTL_EVENT_MASK_VALUE) {
749 pa_sink_get_volume(u->sink, TRUE);
750 pa_sink_get_mute(u->sink, TRUE);
751 }
752
753 return 0;
754 }
755
756 static int sink_get_volume_cb(pa_sink *s) {
757 struct userdata *u = s->userdata;
758 int err;
759 unsigned i;
760 pa_cvolume r;
761 char t[PA_CVOLUME_SNPRINT_MAX];
762
763 pa_assert(u);
764 pa_assert(u->mixer_elem);
765
766 if (u->mixer_seperate_channels) {
767
768 r.channels = s->sample_spec.channels;
769
770 for (i = 0; i < s->sample_spec.channels; i++) {
771 long alsa_vol;
772
773 if (u->hw_dB_supported) {
774
775 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
776 goto fail;
777
778 #ifdef HAVE_VALGRIND_MEMCHECK_H
779 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
780 #endif
781
782 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
783 } else {
784
785 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
786 goto fail;
787
788 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
789 }
790 }
791
792 } else {
793 long alsa_vol;
794
795 pa_assert(u->hw_dB_supported);
796
797 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
798 goto fail;
799
800 #ifdef HAVE_VALGRIND_MEMCHECK_H
801 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
802 #endif
803
804 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
805 }
806
807 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
808
809 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
810
811 u->hardware_volume = s->volume = r;
812
813 if (u->hw_dB_supported) {
814 pa_cvolume reset;
815
816 /* Hmm, so the hardware volume changed, let's reset our software volume */
817
818 pa_cvolume_reset(&reset, s->sample_spec.channels);
819 pa_sink_set_soft_volume(s, &reset);
820 }
821 }
822
823 return 0;
824
825 fail:
826 pa_log_error("Unable to read volume: %s", snd_strerror(err));
827
828 return -1;
829 }
830
831 static int sink_set_volume_cb(pa_sink *s) {
832 struct userdata *u = s->userdata;
833 int err;
834 unsigned i;
835 pa_cvolume r;
836
837 pa_assert(u);
838 pa_assert(u->mixer_elem);
839
840 if (u->mixer_seperate_channels) {
841
842 r.channels = s->sample_spec.channels;
843
844 for (i = 0; i < s->sample_spec.channels; i++) {
845 long alsa_vol;
846 pa_volume_t vol;
847
848 vol = s->volume.values[i];
849
850 if (u->hw_dB_supported) {
851
852 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
853 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
854
855 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
856 goto fail;
857
858 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
859 goto fail;
860
861 r.values[i] = pa_sw_volume_from_dB((double) alsa_vol / 100.0);
862 } else {
863
864 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
865 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
866
867 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
868 goto fail;
869
870 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
871 goto fail;
872
873 r.values[i] = (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) / (double) (u->hw_volume_max - u->hw_volume_min));
874 }
875 }
876
877 } else {
878 pa_volume_t vol;
879 long alsa_vol;
880
881 pa_assert(u->hw_dB_supported);
882
883 vol = pa_cvolume_max(&s->volume);
884
885 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
886 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
887
888 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
889 goto fail;
890
891 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
892 goto fail;
893
894 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) alsa_vol / 100.0));
895 }
896
897 u->hardware_volume = r;
898
899 if (u->hw_dB_supported) {
900 char t[PA_CVOLUME_SNPRINT_MAX];
901
902 /* Match exactly what the user requested by software */
903
904 pa_alsa_volume_divide(&r, &s->volume);
905 pa_sink_set_soft_volume(s, &r);
906
907 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
908 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
909 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
910
911 } else
912
913 /* We can't match exactly what the user requested, hence let's
914 * at least tell the user about it */
915
916 s->volume = r;
917
918 return 0;
919
920 fail:
921 pa_log_error("Unable to set volume: %s", snd_strerror(err));
922
923 return -1;
924 }
925
926 static int sink_get_mute_cb(pa_sink *s) {
927 struct userdata *u = s->userdata;
928 int err, sw;
929
930 pa_assert(u);
931 pa_assert(u->mixer_elem);
932
933 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
934 pa_log_error("Unable to get switch: %s", snd_strerror(err));
935 return -1;
936 }
937
938 s->muted = !sw;
939
940 return 0;
941 }
942
943 static int sink_set_mute_cb(pa_sink *s) {
944 struct userdata *u = s->userdata;
945 int err;
946
947 pa_assert(u);
948 pa_assert(u->mixer_elem);
949
950 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
951 pa_log_error("Unable to set switch: %s", snd_strerror(err));
952 return -1;
953 }
954
955 return 0;
956 }
957
958 static void sink_update_requested_latency_cb(pa_sink *s) {
959 struct userdata *u = s->userdata;
960 snd_pcm_sframes_t before;
961 pa_assert(u);
962
963 if (!u->pcm_handle)
964 return;
965
966 before = u->hwbuf_unused_frames;
967 update_sw_params(u);
968
969 /* Let's check whether we now use only a smaller part of the
970 buffer then before. If so, we need to make sure that subsequent
971 rewinds are relative to the new maxium fill level and not to the
972 current fill level. Thus, let's do a full rewind once, to clear
973 things up. */
974
975 if (u->hwbuf_unused_frames > before) {
976 pa_log_debug("Requesting rewind due to latency change.");
977 pa_sink_request_rewind(s, (size_t) -1);
978 }
979 }
980
981 static int process_rewind(struct userdata *u) {
982 snd_pcm_sframes_t unused;
983 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
984 pa_assert(u);
985
986 /* Figure out how much we shall rewind and reset the counter */
987 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
988 u->sink->thread_info.rewind_nbytes = 0;
989
990 if (rewind_nbytes <= 0)
991 goto finish;
992
993 pa_assert(rewind_nbytes > 0);
994 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
995
996 snd_pcm_hwsync(u->pcm_handle);
997 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
998 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
999 return -1;
1000 }
1001
1002 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1003
1004 if (u->hwbuf_size > unused_nbytes)
1005 limit_nbytes = u->hwbuf_size - unused_nbytes;
1006 else
1007 limit_nbytes = 0;
1008
1009 if (rewind_nbytes > limit_nbytes)
1010 rewind_nbytes = limit_nbytes;
1011
1012 if (rewind_nbytes > 0) {
1013 snd_pcm_sframes_t in_frames, out_frames;
1014
1015 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1016
1017 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1018 pa_log_debug("before: %lu", (unsigned long) in_frames);
1019 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1020 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1021 return -1;
1022 }
1023 pa_log_debug("after: %lu", (unsigned long) out_frames);
1024
1025 rewind_nbytes = (size_t) out_frames * u->frame_size;
1026
1027 if (rewind_nbytes <= 0)
1028 pa_log_info("Tried rewind, but was apparently not possible.");
1029 else {
1030 u->frame_index -= out_frames;
1031 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1032 pa_sink_process_rewind(u->sink, rewind_nbytes);
1033
1034 u->after_rewind = TRUE;
1035 return 0;
1036 }
1037 } else
1038 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1039
1040 finish:
1041
1042 pa_sink_process_rewind(u->sink, 0);
1043
1044 return 0;
1045
1046 }
1047
1048 static void thread_func(void *userdata) {
1049 struct userdata *u = userdata;
1050
1051 pa_assert(u);
1052
1053 pa_log_debug("Thread starting up");
1054
1055 if (u->core->realtime_scheduling)
1056 pa_make_realtime(u->core->realtime_priority);
1057
1058 pa_thread_mq_install(&u->thread_mq);
1059 pa_rtpoll_install(u->rtpoll);
1060
1061 for (;;) {
1062 int ret;
1063
1064 /* pa_log_debug("loop"); */
1065
1066 /* Render some data and write it to the dsp */
1067 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1068 int work_done;
1069 pa_usec_t sleep_usec = 0;
1070
1071 if (u->sink->thread_info.rewind_requested)
1072 if (process_rewind(u) < 0)
1073 goto fail;
1074
1075 if (u->use_mmap)
1076 work_done = mmap_write(u, &sleep_usec);
1077 else
1078 work_done = unix_write(u, &sleep_usec);
1079
1080 if (work_done < 0)
1081 goto fail;
1082
1083 /* pa_log_debug("work_done = %i", work_done); */
1084
1085 if (work_done) {
1086
1087 if (u->first) {
1088 pa_log_info("Starting playback.");
1089 snd_pcm_start(u->pcm_handle);
1090
1091 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1092 }
1093
1094 update_smoother(u);
1095 }
1096
1097 if (u->use_tsched) {
1098 pa_usec_t cusec;
1099
1100 if (u->since_start <= u->hwbuf_size) {
1101
1102 /* USB devices on ALSA seem to hit a buffer
1103 * underrun during the first iterations much
1104 * quicker then we calculate here, probably due to
1105 * the transport latency. To accomodate for that
1106 * we artificially decrease the sleep time until
1107 * we have filled the buffer at least once
1108 * completely.*/
1109
1110 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1111 sleep_usec /= 2;
1112 }
1113
1114 /* OK, the playback buffer is now full, let's
1115 * calculate when to wake up next */
1116 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1117
1118 /* Convert from the sound card time domain to the
1119 * system time domain */
1120 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1121
1122 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1123
1124 /* We don't trust the conversion, so we wake up whatever comes first */
1125 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1126 }
1127
1128 u->first = FALSE;
1129 u->after_rewind = FALSE;
1130
1131 } else if (u->use_tsched)
1132
1133 /* OK, we're in an invalid state, let's disable our timers */
1134 pa_rtpoll_set_timer_disabled(u->rtpoll);
1135
1136 /* Hmm, nothing to do. Let's sleep */
1137 if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
1138 goto fail;
1139
1140 if (ret == 0)
1141 goto finish;
1142
1143 /* Tell ALSA about this and process its response */
1144 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1145 struct pollfd *pollfd;
1146 unsigned short revents = 0;
1147 int err;
1148 unsigned n;
1149
1150 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1151
1152 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1153 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1154 goto fail;
1155 }
1156
1157 if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
1158 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1159 goto fail;
1160
1161 u->first = TRUE;
1162 u->since_start = 0;
1163 }
1164
1165 if (revents && u->use_tsched)
1166 pa_log_debug("Wakeup from ALSA! (%i)", revents);
1167 }
1168 }
1169
1170 fail:
1171 /* If this was no regular exit from the loop we have to continue
1172 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1173 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1174 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1175
1176 finish:
1177 pa_log_debug("Thread shutting down");
1178 }
1179
1180 int pa__init(pa_module*m) {
1181
1182 pa_modargs *ma = NULL;
1183 struct userdata *u = NULL;
1184 const char *dev_id;
1185 pa_sample_spec ss;
1186 pa_channel_map map;
1187 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1188 snd_pcm_uframes_t period_frames, tsched_frames;
1189 size_t frame_size;
1190 snd_pcm_info_t *pcm_info = NULL;
1191 int err;
1192 const char *name;
1193 char *name_buf = NULL;
1194 pa_bool_t namereg_fail;
1195 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1196 pa_usec_t usec;
1197 pa_sink_new_data data;
1198
1199 snd_pcm_info_alloca(&pcm_info);
1200
1201 pa_assert(m);
1202
1203 pa_alsa_redirect_errors_inc();
1204
1205 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1206 pa_log("Failed to parse module arguments");
1207 goto fail;
1208 }
1209
1210 ss = m->core->default_sample_spec;
1211 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1212 pa_log("Failed to parse sample specification and channel map");
1213 goto fail;
1214 }
1215
1216 frame_size = pa_frame_size(&ss);
1217
1218 nfrags = m->core->default_n_fragments;
1219 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1220 if (frag_size <= 0)
1221 frag_size = (uint32_t) frame_size;
1222 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1223 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1224
1225 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1226 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1227 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1228 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1229 pa_log("Failed to parse buffer metrics");
1230 goto fail;
1231 }
1232
1233 hwbuf_size = frag_size * nfrags;
1234 period_frames = frag_size/frame_size;
1235 tsched_frames = tsched_size/frame_size;
1236
1237 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1238 pa_log("Failed to parse mmap argument.");
1239 goto fail;
1240 }
1241
1242 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1243 pa_log("Failed to parse tsched argument.");
1244 goto fail;
1245 }
1246
1247 if (use_tsched && !pa_rtclock_hrtimer()) {
1248 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1249 use_tsched = FALSE;
1250 }
1251
1252 u = pa_xnew0(struct userdata, 1);
1253 u->core = m->core;
1254 u->module = m;
1255 m->userdata = u;
1256 u->use_mmap = use_mmap;
1257 u->use_tsched = use_tsched;
1258 u->first = TRUE;
1259 u->since_start = 0;
1260 u->after_rewind = FALSE;
1261 u->rtpoll = pa_rtpoll_new();
1262 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1263 u->alsa_rtpoll_item = NULL;
1264
1265 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1266 usec = pa_rtclock_usec();
1267 pa_smoother_set_time_offset(u->smoother, usec);
1268 pa_smoother_pause(u->smoother, usec);
1269
1270 snd_config_update_free_global();
1271
1272 b = use_mmap;
1273 d = use_tsched;
1274
1275 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1276
1277 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1278 dev_id,
1279 &u->device_name,
1280 &ss, &map,
1281 SND_PCM_STREAM_PLAYBACK,
1282 &nfrags, &period_frames, tsched_frames,
1283 &b, &d)))
1284
1285 goto fail;
1286
1287 } else {
1288
1289 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1290 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1291 &u->device_name,
1292 &ss, &map,
1293 SND_PCM_STREAM_PLAYBACK,
1294 &nfrags, &period_frames, tsched_frames,
1295 &b, &d)))
1296 goto fail;
1297
1298 }
1299
1300 pa_assert(u->device_name);
1301 pa_log_info("Successfully opened device %s.", u->device_name);
1302
1303 if (use_mmap && !b) {
1304 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1305 u->use_mmap = use_mmap = FALSE;
1306 }
1307
1308 if (use_tsched && (!b || !d)) {
1309 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1310 u->use_tsched = use_tsched = FALSE;
1311 }
1312
1313 if (u->use_mmap)
1314 pa_log_info("Successfully enabled mmap() mode.");
1315
1316 if (u->use_tsched)
1317 pa_log_info("Successfully enabled timer-based scheduling mode.");
1318
1319 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1320 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1321 goto fail;
1322 }
1323
1324 /* ALSA might tweak the sample spec, so recalculate the frame size */
1325 frame_size = pa_frame_size(&ss);
1326
1327 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1328 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1329 else {
1330 pa_bool_t found = FALSE;
1331
1332 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1333 found = TRUE;
1334 else {
1335 snd_pcm_info_t *info;
1336
1337 snd_pcm_info_alloca(&info);
1338
1339 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1340 char *md;
1341 int card;
1342
1343 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1344
1345 md = pa_sprintf_malloc("hw:%i", card);
1346
1347 if (strcmp(u->device_name, md))
1348 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1349 found = TRUE;
1350 pa_xfree(md);
1351 }
1352 }
1353 }
1354
1355 if (found)
1356 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1357 found = FALSE;
1358
1359 if (!found) {
1360 snd_mixer_close(u->mixer_handle);
1361 u->mixer_handle = NULL;
1362 }
1363 }
1364
1365 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1366 namereg_fail = TRUE;
1367 else {
1368 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1369 namereg_fail = FALSE;
1370 }
1371
1372 pa_sink_new_data_init(&data);
1373 data.driver = __FILE__;
1374 data.module = m;
1375 pa_sink_new_data_set_name(&data, name);
1376 data.namereg_fail = namereg_fail;
1377 pa_sink_new_data_set_sample_spec(&data, &ss);
1378 pa_sink_new_data_set_channel_map(&data, &map);
1379
1380 pa_alsa_init_proplist(data.proplist, pcm_info);
1381 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1382 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1383 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1384 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1385
1386 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1387 pa_sink_new_data_done(&data);
1388 pa_xfree(name_buf);
1389
1390 if (!u->sink) {
1391 pa_log("Failed to create sink object");
1392 goto fail;
1393 }
1394
1395 u->sink->parent.process_msg = sink_process_msg;
1396 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1397 u->sink->userdata = u;
1398
1399 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1400 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1401
1402 u->frame_size = frame_size;
1403 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1404 u->nfragments = nfrags;
1405 u->hwbuf_size = u->fragment_size * nfrags;
1406 u->hwbuf_unused_frames = 0;
1407 u->tsched_watermark = tsched_watermark;
1408 u->frame_index = 0;
1409 u->hw_dB_supported = FALSE;
1410 u->hw_dB_min = u->hw_dB_max = 0;
1411 u->hw_volume_min = u->hw_volume_max = 0;
1412 u->mixer_seperate_channels = FALSE;
1413 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1414
1415 if (use_tsched)
1416 fix_tsched_watermark(u);
1417
1418 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1419 u->sink->thread_info.max_request = u->hwbuf_size;
1420
1421 pa_sink_set_latency_range(u->sink,
1422 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1423 pa_bytes_to_usec(u->hwbuf_size, &ss));
1424
1425 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1426 nfrags, (long unsigned) u->fragment_size,
1427 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1428
1429 if (use_tsched)
1430 pa_log_info("Time scheduling watermark is %0.2fms",
1431 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1432
1433 if (update_sw_params(u) < 0)
1434 goto fail;
1435
1436 pa_memchunk_reset(&u->memchunk);
1437
1438 if (u->mixer_handle) {
1439 pa_assert(u->mixer_elem);
1440
1441 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1442 pa_bool_t suitable = TRUE;
1443
1444 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0) {
1445 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1446 suitable = FALSE;
1447 } else {
1448 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1449 pa_assert(u->hw_volume_min < u->hw_volume_max);
1450 }
1451
1452 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1453 pa_log_info("Mixer doesn't support dB information.");
1454 else {
1455 #ifdef HAVE_VALGRIND_MEMCHECK_H
1456 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1457 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1458 #endif
1459
1460 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1461 pa_assert(u->hw_dB_min < u->hw_dB_max);
1462 u->hw_dB_supported = TRUE;
1463 }
1464
1465 if (suitable &&
1466 !u->hw_dB_supported &&
1467 u->hw_volume_max - u->hw_volume_min < 3) {
1468
1469 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1470 suitable = FALSE;
1471 }
1472
1473 if (suitable) {
1474 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1475
1476 u->sink->get_volume = sink_get_volume_cb;
1477 u->sink->set_volume = sink_set_volume_cb;
1478 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1479 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1480
1481 } else
1482 pa_log_info("Using software volume control.");
1483 }
1484
1485 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1486 u->sink->get_mute = sink_get_mute_cb;
1487 u->sink->set_mute = sink_set_mute_cb;
1488 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1489 } else
1490 pa_log_info("Using software mute control.");
1491
1492 u->mixer_fdl = pa_alsa_fdlist_new();
1493
1494 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1495 pa_log("Failed to initialize file descriptor monitoring");
1496 goto fail;
1497 }
1498
1499 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1500 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1501 } else
1502 u->mixer_fdl = NULL;
1503
1504 pa_alsa_dump(u->pcm_handle);
1505
1506 if (!(u->thread = pa_thread_new(thread_func, u))) {
1507 pa_log("Failed to create thread.");
1508 goto fail;
1509 }
1510
1511 /* Get initial mixer settings */
1512 if (data.volume_is_set) {
1513 if (u->sink->set_volume)
1514 u->sink->set_volume(u->sink);
1515 } else {
1516 if (u->sink->get_volume)
1517 u->sink->get_volume(u->sink);
1518 }
1519
1520 if (data.muted_is_set) {
1521 if (u->sink->set_mute)
1522 u->sink->set_mute(u->sink);
1523 } else {
1524 if (u->sink->get_mute)
1525 u->sink->get_mute(u->sink);
1526 }
1527
1528 pa_sink_put(u->sink);
1529
1530 pa_modargs_free(ma);
1531
1532 return 0;
1533
1534 fail:
1535
1536 if (ma)
1537 pa_modargs_free(ma);
1538
1539 pa__done(m);
1540
1541 return -1;
1542 }
1543
1544 void pa__done(pa_module*m) {
1545 struct userdata *u;
1546
1547 pa_assert(m);
1548
1549 if (!(u = m->userdata)) {
1550 pa_alsa_redirect_errors_dec();
1551 return;
1552 }
1553
1554 if (u->sink)
1555 pa_sink_unlink(u->sink);
1556
1557 if (u->thread) {
1558 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1559 pa_thread_free(u->thread);
1560 }
1561
1562 pa_thread_mq_done(&u->thread_mq);
1563
1564 if (u->sink)
1565 pa_sink_unref(u->sink);
1566
1567 if (u->memchunk.memblock)
1568 pa_memblock_unref(u->memchunk.memblock);
1569
1570 if (u->alsa_rtpoll_item)
1571 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1572
1573 if (u->rtpoll)
1574 pa_rtpoll_free(u->rtpoll);
1575
1576 if (u->mixer_fdl)
1577 pa_alsa_fdlist_free(u->mixer_fdl);
1578
1579 if (u->mixer_handle)
1580 snd_mixer_close(u->mixer_handle);
1581
1582 if (u->pcm_handle) {
1583 snd_pcm_drop(u->pcm_handle);
1584 snd_pcm_close(u->pcm_handle);
1585 }
1586
1587 if (u->smoother)
1588 pa_smoother_free(u->smoother);
1589
1590 pa_xfree(u->device_name);
1591 pa_xfree(u);
1592
1593 snd_config_update_free_global();
1594
1595 pa_alsa_redirect_errors_dec();
1596 }