]> code.delx.au - pulseaudio/blob - src/modules/module-alsa-sink.c
Merge commit 'flameeyes/libtool-2.2'
[pulseaudio] / src / modules / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
57
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
67 "rate=<sample rate> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
76
77 static const char* const valid_modargs[] = {
78 "sink_name",
79 "device",
80 "device_id",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 "fragments",
86 "fragment_size",
87 "mmap",
88 "tsched",
89 "tsched_buffer_size",
90 "tsched_buffer_watermark",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117 pa_bool_t mixer_seperate_channels;
118 pa_cvolume hardware_volume;
119
120 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
121 unsigned nfragments;
122 pa_memchunk memchunk;
123
124 char *device_name;
125
126 pa_bool_t use_mmap, use_tsched;
127
128 pa_bool_t first, after_rewind;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
133
134 pa_smoother *smoother;
135 int64_t frame_index;
136 uint64_t since_start;
137
138 snd_pcm_sframes_t hwbuf_unused_frames;
139 };
140
141 static void fix_tsched_watermark(struct userdata *u) {
142 size_t max_use;
143 size_t min_sleep, min_wakeup;
144 pa_assert(u);
145
146 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
147
148 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
149 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
150
151 if (min_sleep > max_use/2)
152 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
153 if (min_sleep < u->frame_size)
154 min_sleep = u->frame_size;
155
156 if (min_wakeup > max_use/2)
157 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
158 if (min_wakeup < u->frame_size)
159 min_wakeup = u->frame_size;
160
161 if (u->tsched_watermark > max_use-min_sleep)
162 u->tsched_watermark = max_use-min_sleep;
163
164 if (u->tsched_watermark < min_wakeup)
165 u->tsched_watermark = min_wakeup;
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
182
183 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
184
185 if (usec >= wm) {
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188 } else
189 *process_usec = *sleep_usec = usec / 2;
190
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 }
193
194 static int try_recover(struct userdata *u, const char *call, int err) {
195 pa_assert(u);
196 pa_assert(call);
197 pa_assert(err < 0);
198
199 pa_log_debug("%s: %s", call, snd_strerror(err));
200
201 pa_assert(err != -EAGAIN);
202
203 if (err == -EPIPE)
204 pa_log_debug("%s: Buffer underrun!", call);
205
206 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
207 u->first = TRUE;
208 u->since_start = 0;
209 return 0;
210 }
211
212 pa_log("%s: %s", call, snd_strerror(err));
213 return -1;
214 }
215
216 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
217 size_t left_to_play;
218
219 if ((size_t) n*u->frame_size < u->hwbuf_size)
220 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
221 else
222 left_to_play = 0;
223
224 if (left_to_play > 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u->first && !u->after_rewind) {
227 pa_log_info("Underrun!");
228
229 if (u->use_tsched) {
230 size_t old_watermark = u->tsched_watermark;
231
232 u->tsched_watermark *= 2;
233 fix_tsched_watermark(u);
234
235 if (old_watermark != u->tsched_watermark)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
238 }
239 }
240
241 return left_to_play;
242 }
243
244 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
245 int work_done = 0;
246 pa_usec_t max_sleep_usec = 0, process_usec = 0;
247 size_t left_to_play;
248
249 pa_assert(u);
250 pa_sink_assert_ref(u->sink);
251
252 if (u->use_tsched)
253 hw_sleep_time(u, &max_sleep_usec, &process_usec);
254
255 for (;;) {
256 snd_pcm_sframes_t n;
257 int r;
258
259 snd_pcm_hwsync(u->pcm_handle);
260
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
263
264 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
265
266 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
267 continue;
268
269 return r;
270 }
271
272 left_to_play = check_left_to_play(u, n);
273
274 if (u->use_tsched)
275
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
281
282 if (!polled &&
283 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
284 break;
285
286 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
287
288 if (polled)
289 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
290 "Most likely this is an ALSA driver bug. Please report this issue to the PulseAudio developers.");
291
292 break;
293 }
294
295 n -= u->hwbuf_unused_frames;
296
297 polled = FALSE;
298
299 /* pa_log_debug("Filling up"); */
300
301 for (;;) {
302 pa_memchunk chunk;
303 void *p;
304 int err;
305 const snd_pcm_channel_area_t *areas;
306 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
307 snd_pcm_sframes_t sframes;
308
309 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
310
311 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
312
313 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
314 continue;
315
316 return r;
317 }
318
319 /* Make sure that if these memblocks need to be copied they will fit into one slot */
320 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
321 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
322
323 /* Check these are multiples of 8 bit */
324 pa_assert((areas[0].first & 7) == 0);
325 pa_assert((areas[0].step & 7)== 0);
326
327 /* We assume a single interleaved memory buffer */
328 pa_assert((areas[0].first >> 3) == 0);
329 pa_assert((areas[0].step >> 3) == u->frame_size);
330
331 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
332
333 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
334 chunk.length = pa_memblock_get_length(chunk.memblock);
335 chunk.index = 0;
336
337 pa_sink_render_into_full(u->sink, &chunk);
338
339 /* FIXME: Maybe we can do something to keep this memory block
340 * a little bit longer around? */
341 pa_memblock_unref_fixed(chunk.memblock);
342
343 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
344
345 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
346 continue;
347
348 return r;
349 }
350
351 work_done = 1;
352
353 u->frame_index += (int64_t) frames;
354 u->since_start += frames * u->frame_size;
355
356 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
357
358 if (frames >= (snd_pcm_uframes_t) n)
359 break;
360
361 n -= (snd_pcm_sframes_t) frames;
362 }
363 }
364
365 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
366 return work_done;
367 }
368
369 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
370 int work_done = 0;
371 pa_usec_t max_sleep_usec = 0, process_usec = 0;
372 size_t left_to_play;
373
374 pa_assert(u);
375 pa_sink_assert_ref(u->sink);
376
377 if (u->use_tsched)
378 hw_sleep_time(u, &max_sleep_usec, &process_usec);
379
380 for (;;) {
381 snd_pcm_sframes_t n;
382 int r;
383
384 snd_pcm_hwsync(u->pcm_handle);
385
386 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
387
388 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
389 continue;
390
391 return r;
392 }
393
394 left_to_play = check_left_to_play(u, n);
395
396 if (u->use_tsched)
397
398 /* We won't fill up the playback buffer before at least
399 * half the sleep time is over because otherwise we might
400 * ask for more data from the clients then they expect. We
401 * need to guarantee that clients only have to keep around
402 * a single hw buffer length. */
403
404 if (!polled &&
405 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
406 break;
407
408 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
409
410 if (polled)
411 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
412 "Most likely this is an ALSA driver bug. Please report this issue to the PulseAudio developers.");
413
414 break;
415 }
416
417 n -= u->hwbuf_unused_frames;
418
419 polled = FALSE;
420
421 for (;;) {
422 snd_pcm_sframes_t frames;
423 void *p;
424
425 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
426
427 if (u->memchunk.length <= 0)
428 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
429
430 pa_assert(u->memchunk.length > 0);
431
432 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
433
434 if (frames > n)
435 frames = n;
436
437 p = pa_memblock_acquire(u->memchunk.memblock);
438 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
439 pa_memblock_release(u->memchunk.memblock);
440
441 pa_assert(frames != 0);
442
443 if (PA_UNLIKELY(frames < 0)) {
444
445 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
446 continue;
447
448 return r;
449 }
450
451 u->memchunk.index += (size_t) frames * u->frame_size;
452 u->memchunk.length -= (size_t) frames * u->frame_size;
453
454 if (u->memchunk.length <= 0) {
455 pa_memblock_unref(u->memchunk.memblock);
456 pa_memchunk_reset(&u->memchunk);
457 }
458
459 work_done = 1;
460
461 u->frame_index += frames;
462 u->since_start += (size_t) frames * u->frame_size;
463
464 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
465
466 if (frames >= n)
467 break;
468
469 n -= frames;
470 }
471 }
472
473 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
474 return work_done;
475 }
476
477 static void update_smoother(struct userdata *u) {
478 snd_pcm_sframes_t delay = 0;
479 int64_t frames;
480 int err;
481 pa_usec_t now1, now2;
482 /* struct timeval timestamp; */
483 snd_pcm_status_t *status;
484
485 snd_pcm_status_alloca(&status);
486
487 pa_assert(u);
488 pa_assert(u->pcm_handle);
489
490 /* Let's update the time smoother */
491
492 snd_pcm_hwsync(u->pcm_handle);
493 snd_pcm_avail_update(u->pcm_handle);
494
495 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
496 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
497 /* return; */
498 /* } */
499
500 /* delay = snd_pcm_status_get_delay(status); */
501
502 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
503 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
504 return;
505 }
506
507 frames = u->frame_index - delay;
508
509 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
510
511 /* snd_pcm_status_get_tstamp(status, &timestamp); */
512 /* pa_rtclock_from_wallclock(&timestamp); */
513 /* now1 = pa_timeval_load(&timestamp); */
514
515 now1 = pa_rtclock_usec();
516 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
517 pa_smoother_put(u->smoother, now1, now2);
518 }
519
520 static pa_usec_t sink_get_latency(struct userdata *u) {
521 pa_usec_t r = 0;
522 int64_t delay;
523 pa_usec_t now1, now2;
524
525 pa_assert(u);
526
527 now1 = pa_rtclock_usec();
528 now2 = pa_smoother_get(u->smoother, now1);
529
530 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
531
532 if (delay > 0)
533 r = (pa_usec_t) delay;
534
535 if (u->memchunk.memblock)
536 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
537
538 return r;
539 }
540
541 static int build_pollfd(struct userdata *u) {
542 pa_assert(u);
543 pa_assert(u->pcm_handle);
544
545 if (u->alsa_rtpoll_item)
546 pa_rtpoll_item_free(u->alsa_rtpoll_item);
547
548 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
549 return -1;
550
551 return 0;
552 }
553
554 static int suspend(struct userdata *u) {
555 pa_assert(u);
556 pa_assert(u->pcm_handle);
557
558 pa_smoother_pause(u->smoother, pa_rtclock_usec());
559
560 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
561 * take awfully long with our long buffer sizes today. */
562 snd_pcm_close(u->pcm_handle);
563 u->pcm_handle = NULL;
564
565 if (u->alsa_rtpoll_item) {
566 pa_rtpoll_item_free(u->alsa_rtpoll_item);
567 u->alsa_rtpoll_item = NULL;
568 }
569
570 pa_log_info("Device suspended...");
571
572 return 0;
573 }
574
575 static int update_sw_params(struct userdata *u) {
576 snd_pcm_uframes_t avail_min;
577 int err;
578
579 pa_assert(u);
580
581 /* Use the full buffer if noone asked us for anything specific */
582 u->hwbuf_unused_frames = 0;
583
584 if (u->use_tsched) {
585 pa_usec_t latency;
586
587 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
588 size_t b;
589
590 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
591
592 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
593
594 /* We need at least one sample in our buffer */
595
596 if (PA_UNLIKELY(b < u->frame_size))
597 b = u->frame_size;
598
599 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
600 (PA_LIKELY(b < u->hwbuf_size) ?
601 ((u->hwbuf_size - b) / u->frame_size) : 0);
602
603 fix_tsched_watermark(u);
604 }
605 }
606
607 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
608
609 /* We need at last one frame in the used part of the buffer */
610 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
611
612 if (u->use_tsched) {
613 pa_usec_t sleep_usec, process_usec;
614
615 hw_sleep_time(u, &sleep_usec, &process_usec);
616 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec);
617 }
618
619 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
620
621 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
622 pa_log("Failed to set software parameters: %s", snd_strerror(err));
623 return err;
624 }
625
626 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
627
628 return 0;
629 }
630
631 static int unsuspend(struct userdata *u) {
632 pa_sample_spec ss;
633 int err;
634 pa_bool_t b, d;
635 unsigned nfrags;
636 snd_pcm_uframes_t period_size;
637
638 pa_assert(u);
639 pa_assert(!u->pcm_handle);
640
641 pa_log_info("Trying resume...");
642
643 snd_config_update_free_global();
644 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
645 /*SND_PCM_NONBLOCK|*/
646 SND_PCM_NO_AUTO_RESAMPLE|
647 SND_PCM_NO_AUTO_CHANNELS|
648 SND_PCM_NO_AUTO_FORMAT)) < 0) {
649 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
650 goto fail;
651 }
652
653 ss = u->sink->sample_spec;
654 nfrags = u->nfragments;
655 period_size = u->fragment_size / u->frame_size;
656 b = u->use_mmap;
657 d = u->use_tsched;
658
659 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
660 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
661 goto fail;
662 }
663
664 if (b != u->use_mmap || d != u->use_tsched) {
665 pa_log_warn("Resume failed, couldn't get original access mode.");
666 goto fail;
667 }
668
669 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
670 pa_log_warn("Resume failed, couldn't restore original sample settings.");
671 goto fail;
672 }
673
674 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
675 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
676 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
677 (unsigned long) nfrags, period_size * u->frame_size);
678 goto fail;
679 }
680
681 if (update_sw_params(u) < 0)
682 goto fail;
683
684 if (build_pollfd(u) < 0)
685 goto fail;
686
687 /* FIXME: We need to reload the volume somehow */
688
689 u->first = TRUE;
690 u->since_start = 0;
691
692 pa_log_info("Resumed successfully...");
693
694 return 0;
695
696 fail:
697 if (u->pcm_handle) {
698 snd_pcm_close(u->pcm_handle);
699 u->pcm_handle = NULL;
700 }
701
702 return -1;
703 }
704
705 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
706 struct userdata *u = PA_SINK(o)->userdata;
707
708 switch (code) {
709
710 case PA_SINK_MESSAGE_GET_LATENCY: {
711 pa_usec_t r = 0;
712
713 if (u->pcm_handle)
714 r = sink_get_latency(u);
715
716 *((pa_usec_t*) data) = r;
717
718 return 0;
719 }
720
721 case PA_SINK_MESSAGE_SET_STATE:
722
723 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
724
725 case PA_SINK_SUSPENDED:
726 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
727
728 if (suspend(u) < 0)
729 return -1;
730
731 break;
732
733 case PA_SINK_IDLE:
734 case PA_SINK_RUNNING:
735
736 if (u->sink->thread_info.state == PA_SINK_INIT) {
737 if (build_pollfd(u) < 0)
738 return -1;
739 }
740
741 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
742 if (unsuspend(u) < 0)
743 return -1;
744 }
745
746 break;
747
748 case PA_SINK_UNLINKED:
749 case PA_SINK_INIT:
750 ;
751 }
752
753 break;
754 }
755
756 return pa_sink_process_msg(o, code, data, offset, chunk);
757 }
758
759 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
760 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
761
762 pa_assert(u);
763 pa_assert(u->mixer_handle);
764
765 if (mask == SND_CTL_EVENT_MASK_REMOVE)
766 return 0;
767
768 if (mask & SND_CTL_EVENT_MASK_VALUE) {
769 pa_sink_get_volume(u->sink, TRUE);
770 pa_sink_get_mute(u->sink, TRUE);
771 }
772
773 return 0;
774 }
775
776 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
777
778 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
779 (double) (u->hw_volume_max - u->hw_volume_min));
780 }
781
782 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
783 long alsa_vol;
784
785 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
786 / PA_VOLUME_NORM) + u->hw_volume_min;
787
788 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
789 }
790
791 static int sink_get_volume_cb(pa_sink *s) {
792 struct userdata *u = s->userdata;
793 int err;
794 unsigned i;
795 pa_cvolume r;
796 char t[PA_CVOLUME_SNPRINT_MAX];
797
798 pa_assert(u);
799 pa_assert(u->mixer_elem);
800
801 if (u->mixer_seperate_channels) {
802
803 r.channels = s->sample_spec.channels;
804
805 for (i = 0; i < s->sample_spec.channels; i++) {
806 long alsa_vol;
807
808 if (u->hw_dB_supported) {
809
810 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
811 goto fail;
812
813 #ifdef HAVE_VALGRIND_MEMCHECK_H
814 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
815 #endif
816
817 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
818 } else {
819
820 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
821 goto fail;
822
823 r.values[i] = from_alsa_volume(u, alsa_vol);
824 }
825 }
826
827 } else {
828 long alsa_vol;
829
830 if (u->hw_dB_supported) {
831
832 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
833 goto fail;
834
835 #ifdef HAVE_VALGRIND_MEMCHECK_H
836 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
837 #endif
838
839 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
840
841 } else {
842
843 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
844 goto fail;
845
846 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
847 }
848 }
849
850 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
851
852 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
853
854 u->hardware_volume = s->volume = r;
855
856 if (u->hw_dB_supported) {
857 pa_cvolume reset;
858
859 /* Hmm, so the hardware volume changed, let's reset our software volume */
860
861 pa_cvolume_reset(&reset, s->sample_spec.channels);
862 pa_sink_set_soft_volume(s, &reset);
863 }
864 }
865
866 return 0;
867
868 fail:
869 pa_log_error("Unable to read volume: %s", snd_strerror(err));
870
871 return -1;
872 }
873
874 static int sink_set_volume_cb(pa_sink *s) {
875 struct userdata *u = s->userdata;
876 int err;
877 unsigned i;
878 pa_cvolume r;
879
880 pa_assert(u);
881 pa_assert(u->mixer_elem);
882
883 if (u->mixer_seperate_channels) {
884
885 r.channels = s->sample_spec.channels;
886
887 for (i = 0; i < s->sample_spec.channels; i++) {
888 long alsa_vol;
889 pa_volume_t vol;
890
891 vol = s->volume.values[i];
892
893 if (u->hw_dB_supported) {
894
895 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
896 alsa_vol += u->hw_dB_max;
897 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
898
899 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
900 goto fail;
901
902 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
903 goto fail;
904
905 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
906
907 } else {
908 alsa_vol = to_alsa_volume(u, vol);
909
910 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
911 goto fail;
912
913 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
914 goto fail;
915
916 r.values[i] = from_alsa_volume(u, alsa_vol);
917 }
918 }
919
920 } else {
921 pa_volume_t vol;
922 long alsa_vol;
923
924 vol = pa_cvolume_max(&s->volume);
925
926 if (u->hw_dB_supported) {
927 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
928 alsa_vol += u->hw_dB_max;
929 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
930
931 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
932 goto fail;
933
934 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
935 goto fail;
936
937 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
938
939 } else {
940 alsa_vol = to_alsa_volume(u, vol);
941
942 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
943 goto fail;
944
945 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
946 goto fail;
947
948 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
949 }
950 }
951
952 u->hardware_volume = r;
953
954 if (u->hw_dB_supported) {
955 char t[PA_CVOLUME_SNPRINT_MAX];
956
957 /* Match exactly what the user requested by software */
958
959 pa_sw_cvolume_divide(&r, &s->volume, &r);
960 pa_sink_set_soft_volume(s, &r);
961
962 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
963 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
964 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
965
966 } else
967
968 /* We can't match exactly what the user requested, hence let's
969 * at least tell the user about it */
970
971 s->volume = r;
972
973 return 0;
974
975 fail:
976 pa_log_error("Unable to set volume: %s", snd_strerror(err));
977
978 return -1;
979 }
980
981 static int sink_get_mute_cb(pa_sink *s) {
982 struct userdata *u = s->userdata;
983 int err, sw;
984
985 pa_assert(u);
986 pa_assert(u->mixer_elem);
987
988 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
989 pa_log_error("Unable to get switch: %s", snd_strerror(err));
990 return -1;
991 }
992
993 s->muted = !sw;
994
995 return 0;
996 }
997
998 static int sink_set_mute_cb(pa_sink *s) {
999 struct userdata *u = s->userdata;
1000 int err;
1001
1002 pa_assert(u);
1003 pa_assert(u->mixer_elem);
1004
1005 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1006 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1007 return -1;
1008 }
1009
1010 return 0;
1011 }
1012
1013 static void sink_update_requested_latency_cb(pa_sink *s) {
1014 struct userdata *u = s->userdata;
1015 snd_pcm_sframes_t before;
1016 pa_assert(u);
1017
1018 if (!u->pcm_handle)
1019 return;
1020
1021 before = u->hwbuf_unused_frames;
1022 update_sw_params(u);
1023
1024 /* Let's check whether we now use only a smaller part of the
1025 buffer then before. If so, we need to make sure that subsequent
1026 rewinds are relative to the new maxium fill level and not to the
1027 current fill level. Thus, let's do a full rewind once, to clear
1028 things up. */
1029
1030 if (u->hwbuf_unused_frames > before) {
1031 pa_log_debug("Requesting rewind due to latency change.");
1032 pa_sink_request_rewind(s, (size_t) -1);
1033 }
1034 }
1035
1036 static int process_rewind(struct userdata *u) {
1037 snd_pcm_sframes_t unused;
1038 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1039 pa_assert(u);
1040
1041 /* Figure out how much we shall rewind and reset the counter */
1042 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1043 u->sink->thread_info.rewind_nbytes = 0;
1044
1045 if (rewind_nbytes <= 0)
1046 goto finish;
1047
1048 pa_assert(rewind_nbytes > 0);
1049 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1050
1051 snd_pcm_hwsync(u->pcm_handle);
1052 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1053 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1054 return -1;
1055 }
1056
1057 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1058
1059 if (u->hwbuf_size > unused_nbytes)
1060 limit_nbytes = u->hwbuf_size - unused_nbytes;
1061 else
1062 limit_nbytes = 0;
1063
1064 if (rewind_nbytes > limit_nbytes)
1065 rewind_nbytes = limit_nbytes;
1066
1067 if (rewind_nbytes > 0) {
1068 snd_pcm_sframes_t in_frames, out_frames;
1069
1070 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1071
1072 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1073 pa_log_debug("before: %lu", (unsigned long) in_frames);
1074 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1075 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1076 return -1;
1077 }
1078 pa_log_debug("after: %lu", (unsigned long) out_frames);
1079
1080 rewind_nbytes = (size_t) out_frames * u->frame_size;
1081
1082 if (rewind_nbytes <= 0)
1083 pa_log_info("Tried rewind, but was apparently not possible.");
1084 else {
1085 u->frame_index -= out_frames;
1086 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1087 pa_sink_process_rewind(u->sink, rewind_nbytes);
1088
1089 u->after_rewind = TRUE;
1090 return 0;
1091 }
1092 } else
1093 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1094
1095 finish:
1096
1097 pa_sink_process_rewind(u->sink, 0);
1098
1099 return 0;
1100
1101 }
1102
1103 static void thread_func(void *userdata) {
1104 struct userdata *u = userdata;
1105 unsigned short revents = 0;
1106
1107 pa_assert(u);
1108
1109 pa_log_debug("Thread starting up");
1110
1111 if (u->core->realtime_scheduling)
1112 pa_make_realtime(u->core->realtime_priority);
1113
1114 pa_thread_mq_install(&u->thread_mq);
1115 pa_rtpoll_install(u->rtpoll);
1116
1117 for (;;) {
1118 int ret;
1119
1120 /* pa_log_debug("loop"); */
1121
1122 /* Render some data and write it to the dsp */
1123 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1124 int work_done;
1125 pa_usec_t sleep_usec = 0;
1126
1127 if (u->sink->thread_info.rewind_requested)
1128 if (process_rewind(u) < 0)
1129 goto fail;
1130
1131 if (u->use_mmap)
1132 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1133 else
1134 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1135
1136 if (work_done < 0)
1137 goto fail;
1138
1139 /* pa_log_debug("work_done = %i", work_done); */
1140
1141 if (work_done) {
1142
1143 if (u->first) {
1144 pa_log_info("Starting playback.");
1145 snd_pcm_start(u->pcm_handle);
1146
1147 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1148 }
1149
1150 update_smoother(u);
1151 }
1152
1153 if (u->use_tsched) {
1154 pa_usec_t cusec;
1155
1156 if (u->since_start <= u->hwbuf_size) {
1157
1158 /* USB devices on ALSA seem to hit a buffer
1159 * underrun during the first iterations much
1160 * quicker then we calculate here, probably due to
1161 * the transport latency. To accomodate for that
1162 * we artificially decrease the sleep time until
1163 * we have filled the buffer at least once
1164 * completely.*/
1165
1166 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1167 sleep_usec /= 2;
1168 }
1169
1170 /* OK, the playback buffer is now full, let's
1171 * calculate when to wake up next */
1172 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1173
1174 /* Convert from the sound card time domain to the
1175 * system time domain */
1176 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1177
1178 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1179
1180 /* We don't trust the conversion, so we wake up whatever comes first */
1181 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1182 }
1183
1184 u->first = FALSE;
1185 u->after_rewind = FALSE;
1186
1187 } else if (u->use_tsched)
1188
1189 /* OK, we're in an invalid state, let's disable our timers */
1190 pa_rtpoll_set_timer_disabled(u->rtpoll);
1191
1192 /* Hmm, nothing to do. Let's sleep */
1193 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1194 goto fail;
1195
1196 if (ret == 0)
1197 goto finish;
1198
1199 /* Tell ALSA about this and process its response */
1200 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1201 struct pollfd *pollfd;
1202 int err;
1203 unsigned n;
1204
1205 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1206
1207 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1208 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1209 goto fail;
1210 }
1211
1212 if (revents & (POLLIN|POLLERR|POLLNVAL|POLLHUP|POLLPRI)) {
1213 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1214 goto fail;
1215
1216 u->first = TRUE;
1217 u->since_start = 0;
1218 }
1219
1220 if (revents && u->use_tsched)
1221 pa_log_debug("Wakeup from ALSA!%s%s", (revents & POLLIN) ? " INPUT" : "", (revents & POLLOUT) ? " OUTPUT" : "");
1222 } else
1223 revents = 0;
1224 }
1225
1226 fail:
1227 /* If this was no regular exit from the loop we have to continue
1228 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1229 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1230 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1231
1232 finish:
1233 pa_log_debug("Thread shutting down");
1234 }
1235
1236 int pa__init(pa_module*m) {
1237
1238 pa_modargs *ma = NULL;
1239 struct userdata *u = NULL;
1240 const char *dev_id;
1241 pa_sample_spec ss;
1242 pa_channel_map map;
1243 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1244 snd_pcm_uframes_t period_frames, tsched_frames;
1245 size_t frame_size;
1246 snd_pcm_info_t *pcm_info = NULL;
1247 int err;
1248 const char *name;
1249 char *name_buf = NULL;
1250 pa_bool_t namereg_fail;
1251 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1252 pa_usec_t usec;
1253 pa_sink_new_data data;
1254
1255 snd_pcm_info_alloca(&pcm_info);
1256
1257 pa_assert(m);
1258
1259 pa_alsa_redirect_errors_inc();
1260
1261 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1262 pa_log("Failed to parse module arguments");
1263 goto fail;
1264 }
1265
1266 ss = m->core->default_sample_spec;
1267 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1268 pa_log("Failed to parse sample specification and channel map");
1269 goto fail;
1270 }
1271
1272 frame_size = pa_frame_size(&ss);
1273
1274 nfrags = m->core->default_n_fragments;
1275 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1276 if (frag_size <= 0)
1277 frag_size = (uint32_t) frame_size;
1278 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1279 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1280
1281 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1282 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1283 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1284 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1285 pa_log("Failed to parse buffer metrics");
1286 goto fail;
1287 }
1288
1289 hwbuf_size = frag_size * nfrags;
1290 period_frames = frag_size/frame_size;
1291 tsched_frames = tsched_size/frame_size;
1292
1293 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1294 pa_log("Failed to parse mmap argument.");
1295 goto fail;
1296 }
1297
1298 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1299 pa_log("Failed to parse tsched argument.");
1300 goto fail;
1301 }
1302
1303 if (use_tsched && !pa_rtclock_hrtimer()) {
1304 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1305 use_tsched = FALSE;
1306 }
1307
1308 u = pa_xnew0(struct userdata, 1);
1309 u->core = m->core;
1310 u->module = m;
1311 m->userdata = u;
1312 u->use_mmap = use_mmap;
1313 u->use_tsched = use_tsched;
1314 u->first = TRUE;
1315 u->since_start = 0;
1316 u->after_rewind = FALSE;
1317 u->rtpoll = pa_rtpoll_new();
1318 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1319 u->alsa_rtpoll_item = NULL;
1320
1321 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1322 usec = pa_rtclock_usec();
1323 pa_smoother_set_time_offset(u->smoother, usec);
1324 pa_smoother_pause(u->smoother, usec);
1325
1326 snd_config_update_free_global();
1327
1328 b = use_mmap;
1329 d = use_tsched;
1330
1331 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1332
1333 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1334 dev_id,
1335 &u->device_name,
1336 &ss, &map,
1337 SND_PCM_STREAM_PLAYBACK,
1338 &nfrags, &period_frames, tsched_frames,
1339 &b, &d)))
1340
1341 goto fail;
1342
1343 } else {
1344
1345 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1346 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1347 &u->device_name,
1348 &ss, &map,
1349 SND_PCM_STREAM_PLAYBACK,
1350 &nfrags, &period_frames, tsched_frames,
1351 &b, &d)))
1352 goto fail;
1353
1354 }
1355
1356 pa_assert(u->device_name);
1357 pa_log_info("Successfully opened device %s.", u->device_name);
1358
1359 if (use_mmap && !b) {
1360 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1361 u->use_mmap = use_mmap = FALSE;
1362 }
1363
1364 if (use_tsched && (!b || !d)) {
1365 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1366 u->use_tsched = use_tsched = FALSE;
1367 }
1368
1369 if (u->use_mmap)
1370 pa_log_info("Successfully enabled mmap() mode.");
1371
1372 if (u->use_tsched)
1373 pa_log_info("Successfully enabled timer-based scheduling mode.");
1374
1375 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1376 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1377 goto fail;
1378 }
1379
1380 /* ALSA might tweak the sample spec, so recalculate the frame size */
1381 frame_size = pa_frame_size(&ss);
1382
1383 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1384 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1385 else {
1386 pa_bool_t found = FALSE;
1387
1388 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1389 found = TRUE;
1390 else {
1391 snd_pcm_info_t *info;
1392
1393 snd_pcm_info_alloca(&info);
1394
1395 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1396 char *md;
1397 int card;
1398
1399 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1400
1401 md = pa_sprintf_malloc("hw:%i", card);
1402
1403 if (strcmp(u->device_name, md))
1404 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1405 found = TRUE;
1406 pa_xfree(md);
1407 }
1408 }
1409 }
1410
1411 if (found)
1412 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
1413 found = FALSE;
1414
1415 if (!found) {
1416 snd_mixer_close(u->mixer_handle);
1417 u->mixer_handle = NULL;
1418 }
1419 }
1420
1421 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1422 namereg_fail = TRUE;
1423 else {
1424 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1425 namereg_fail = FALSE;
1426 }
1427
1428 pa_sink_new_data_init(&data);
1429 data.driver = __FILE__;
1430 data.module = m;
1431 pa_sink_new_data_set_name(&data, name);
1432 data.namereg_fail = namereg_fail;
1433 pa_sink_new_data_set_sample_spec(&data, &ss);
1434 pa_sink_new_data_set_channel_map(&data, &map);
1435
1436 pa_alsa_init_proplist(data.proplist, pcm_info);
1437 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1438 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1439 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1440 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1441
1442 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1443 pa_sink_new_data_done(&data);
1444 pa_xfree(name_buf);
1445
1446 if (!u->sink) {
1447 pa_log("Failed to create sink object");
1448 goto fail;
1449 }
1450
1451 u->sink->parent.process_msg = sink_process_msg;
1452 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1453 u->sink->userdata = u;
1454
1455 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1456 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1457
1458 u->frame_size = frame_size;
1459 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1460 u->nfragments = nfrags;
1461 u->hwbuf_size = u->fragment_size * nfrags;
1462 u->hwbuf_unused_frames = 0;
1463 u->tsched_watermark = tsched_watermark;
1464 u->frame_index = 0;
1465 u->hw_dB_supported = FALSE;
1466 u->hw_dB_min = u->hw_dB_max = 0;
1467 u->hw_volume_min = u->hw_volume_max = 0;
1468 u->mixer_seperate_channels = FALSE;
1469 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1470
1471 if (use_tsched)
1472 fix_tsched_watermark(u);
1473
1474 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1475 u->sink->thread_info.max_request = u->hwbuf_size;
1476
1477 pa_sink_set_latency_range(u->sink,
1478 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1479 pa_bytes_to_usec(u->hwbuf_size, &ss));
1480
1481 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1482 nfrags, (long unsigned) u->fragment_size,
1483 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1484
1485 if (use_tsched)
1486 pa_log_info("Time scheduling watermark is %0.2fms",
1487 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1488
1489 if (update_sw_params(u) < 0)
1490 goto fail;
1491
1492 pa_memchunk_reset(&u->memchunk);
1493
1494 if (u->mixer_handle) {
1495 pa_assert(u->mixer_elem);
1496
1497 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1498 pa_bool_t suitable = FALSE;
1499
1500 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1501 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1502 else if (u->hw_volume_min >= u->hw_volume_max)
1503 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1504 else {
1505 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1506 suitable = TRUE;
1507 }
1508
1509 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1510 pa_log_info("Mixer doesn't support dB information.");
1511 else {
1512 #ifdef HAVE_VALGRIND_MEMCHECK_H
1513 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1514 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1515 #endif
1516
1517 if (u->hw_dB_min >= u->hw_dB_max)
1518 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1519 else {
1520 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1521 u->hw_dB_supported = TRUE;
1522 }
1523 }
1524
1525 if (suitable &&
1526 !u->hw_dB_supported &&
1527 u->hw_volume_max - u->hw_volume_min < 3) {
1528
1529 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1530 suitable = FALSE;
1531 }
1532
1533 if (suitable) {
1534 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1535
1536 u->sink->get_volume = sink_get_volume_cb;
1537 u->sink->set_volume = sink_set_volume_cb;
1538 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1539 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1540
1541 } else
1542 pa_log_info("Using software volume control.");
1543 }
1544
1545 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1546 u->sink->get_mute = sink_get_mute_cb;
1547 u->sink->set_mute = sink_set_mute_cb;
1548 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1549 } else
1550 pa_log_info("Using software mute control.");
1551
1552 u->mixer_fdl = pa_alsa_fdlist_new();
1553
1554 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1555 pa_log("Failed to initialize file descriptor monitoring");
1556 goto fail;
1557 }
1558
1559 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1560 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1561 } else
1562 u->mixer_fdl = NULL;
1563
1564 pa_alsa_dump(u->pcm_handle);
1565
1566 if (!(u->thread = pa_thread_new(thread_func, u))) {
1567 pa_log("Failed to create thread.");
1568 goto fail;
1569 }
1570
1571 /* Get initial mixer settings */
1572 if (data.volume_is_set) {
1573 if (u->sink->set_volume)
1574 u->sink->set_volume(u->sink);
1575 } else {
1576 if (u->sink->get_volume)
1577 u->sink->get_volume(u->sink);
1578 }
1579
1580 if (data.muted_is_set) {
1581 if (u->sink->set_mute)
1582 u->sink->set_mute(u->sink);
1583 } else {
1584 if (u->sink->get_mute)
1585 u->sink->get_mute(u->sink);
1586 }
1587
1588 pa_sink_put(u->sink);
1589
1590 pa_modargs_free(ma);
1591
1592 return 0;
1593
1594 fail:
1595
1596 if (ma)
1597 pa_modargs_free(ma);
1598
1599 pa__done(m);
1600
1601 return -1;
1602 }
1603
1604 void pa__done(pa_module*m) {
1605 struct userdata *u;
1606
1607 pa_assert(m);
1608
1609 if (!(u = m->userdata)) {
1610 pa_alsa_redirect_errors_dec();
1611 return;
1612 }
1613
1614 if (u->sink)
1615 pa_sink_unlink(u->sink);
1616
1617 if (u->thread) {
1618 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1619 pa_thread_free(u->thread);
1620 }
1621
1622 pa_thread_mq_done(&u->thread_mq);
1623
1624 if (u->sink)
1625 pa_sink_unref(u->sink);
1626
1627 if (u->memchunk.memblock)
1628 pa_memblock_unref(u->memchunk.memblock);
1629
1630 if (u->alsa_rtpoll_item)
1631 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1632
1633 if (u->rtpoll)
1634 pa_rtpoll_free(u->rtpoll);
1635
1636 if (u->mixer_fdl)
1637 pa_alsa_fdlist_free(u->mixer_fdl);
1638
1639 if (u->mixer_handle)
1640 snd_mixer_close(u->mixer_handle);
1641
1642 if (u->pcm_handle) {
1643 snd_pcm_drop(u->pcm_handle);
1644 snd_pcm_close(u->pcm_handle);
1645 }
1646
1647 if (u->smoother)
1648 pa_smoother_free(u->smoother);
1649
1650 pa_xfree(u->device_name);
1651 pa_xfree(u);
1652
1653 snd_config_update_free_global();
1654
1655 pa_alsa_redirect_errors_dec();
1656 }