]> code.delx.au - pulseaudio/blob - src/modules/alsa/module-alsa-sink.c
Split up pa_alsa_init_proplist into two seperate functions for the card and snd_pcm_t...
[pulseaudio] / src / modules / alsa / module-alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "module-alsa-sink-symdef.h"
57
58 PA_MODULE_AUTHOR("Lennart Poettering");
59 PA_MODULE_DESCRIPTION("ALSA Sink");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 "sink_name=<name for the sink> "
64 "device=<ALSA device> "
65 "device_id=<ALSA card index> "
66 "format=<sample format> "
67 "rate=<sample rate> "
68 "channels=<number of channels> "
69 "channel_map=<channel map> "
70 "fragments=<number of fragments> "
71 "fragment_size=<fragment size> "
72 "mmap=<enable memory mapping?> "
73 "tsched=<enable system timer based scheduling mode?> "
74 "tsched_buffer_size=<buffer size when using timer based scheduling> "
75 "tsched_buffer_watermark=<lower fill watermark>");
76
77 static const char* const valid_modargs[] = {
78 "sink_name",
79 "device",
80 "device_id",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 "fragments",
86 "fragment_size",
87 "mmap",
88 "tsched",
89 "tsched_buffer_size",
90 "tsched_buffer_watermark",
91 NULL
92 };
93
94 #define DEFAULT_DEVICE "default"
95 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
96 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
97 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
98 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
99
100 struct userdata {
101 pa_core *core;
102 pa_module *module;
103 pa_sink *sink;
104
105 pa_thread *thread;
106 pa_thread_mq thread_mq;
107 pa_rtpoll *rtpoll;
108
109 snd_pcm_t *pcm_handle;
110
111 pa_alsa_fdlist *mixer_fdl;
112 snd_mixer_t *mixer_handle;
113 snd_mixer_elem_t *mixer_elem;
114 long hw_volume_max, hw_volume_min;
115 long hw_dB_max, hw_dB_min;
116 pa_bool_t hw_dB_supported;
117 pa_bool_t mixer_seperate_channels;
118 pa_cvolume hardware_volume;
119
120 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
121 unsigned nfragments;
122 pa_memchunk memchunk;
123
124 char *device_name;
125
126 pa_bool_t use_mmap, use_tsched;
127
128 pa_bool_t first, after_rewind;
129
130 pa_rtpoll_item *alsa_rtpoll_item;
131
132 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
133
134 pa_smoother *smoother;
135 int64_t frame_index;
136 uint64_t since_start;
137
138 snd_pcm_sframes_t hwbuf_unused_frames;
139 };
140
141 static void fix_tsched_watermark(struct userdata *u) {
142 size_t max_use;
143 size_t min_sleep, min_wakeup;
144 pa_assert(u);
145
146 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
147
148 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
149 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
150
151 if (min_sleep > max_use/2)
152 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
153 if (min_sleep < u->frame_size)
154 min_sleep = u->frame_size;
155
156 if (min_wakeup > max_use/2)
157 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
158 if (min_wakeup < u->frame_size)
159 min_wakeup = u->frame_size;
160
161 if (u->tsched_watermark > max_use-min_sleep)
162 u->tsched_watermark = max_use-min_sleep;
163
164 if (u->tsched_watermark < min_wakeup)
165 u->tsched_watermark = min_wakeup;
166 }
167
168 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
169 pa_usec_t usec, wm;
170
171 pa_assert(sleep_usec);
172 pa_assert(process_usec);
173
174 pa_assert(u);
175
176 usec = pa_sink_get_requested_latency_within_thread(u->sink);
177
178 if (usec == (pa_usec_t) -1)
179 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
180
181 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
182
183 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
184
185 if (usec >= wm) {
186 *sleep_usec = usec - wm;
187 *process_usec = wm;
188 } else
189 *process_usec = *sleep_usec = usec / 2;
190
191 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
192 }
193
194 static int try_recover(struct userdata *u, const char *call, int err) {
195 pa_assert(u);
196 pa_assert(call);
197 pa_assert(err < 0);
198
199 pa_log_debug("%s: %s", call, snd_strerror(err));
200
201 pa_assert(err != -EAGAIN);
202
203 if (err == -EPIPE)
204 pa_log_debug("%s: Buffer underrun!", call);
205
206 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
207 u->first = TRUE;
208 u->since_start = 0;
209 return 0;
210 }
211
212 pa_log("%s: %s", call, snd_strerror(err));
213 return -1;
214 }
215
216 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
217 size_t left_to_play;
218
219 if ((size_t) n*u->frame_size < u->hwbuf_size)
220 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
221 else
222 left_to_play = 0;
223
224 if (left_to_play > 0) {
225 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
226 } else if (!u->first && !u->after_rewind) {
227 pa_log_info("Underrun!");
228
229 if (u->use_tsched) {
230 size_t old_watermark = u->tsched_watermark;
231
232 u->tsched_watermark *= 2;
233 fix_tsched_watermark(u);
234
235 if (old_watermark != u->tsched_watermark)
236 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
237 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
238 }
239 }
240
241 return left_to_play;
242 }
243
244 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
245 int work_done = 0;
246 pa_usec_t max_sleep_usec = 0, process_usec = 0;
247 size_t left_to_play;
248
249 pa_assert(u);
250 pa_sink_assert_ref(u->sink);
251
252 if (u->use_tsched)
253 hw_sleep_time(u, &max_sleep_usec, &process_usec);
254
255 for (;;) {
256 snd_pcm_sframes_t n;
257 int r;
258
259 snd_pcm_hwsync(u->pcm_handle);
260
261 /* First we determine how many samples are missing to fill the
262 * buffer up to 100% */
263
264 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
265
266 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
267 continue;
268
269 return r;
270 }
271
272 left_to_play = check_left_to_play(u, n);
273
274 if (u->use_tsched)
275
276 /* We won't fill up the playback buffer before at least
277 * half the sleep time is over because otherwise we might
278 * ask for more data from the clients then they expect. We
279 * need to guarantee that clients only have to keep around
280 * a single hw buffer length. */
281
282 if (!polled &&
283 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
284 break;
285
286 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
287
288 if (polled)
289 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
290 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
291 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
292
293 break;
294 }
295
296 n -= u->hwbuf_unused_frames;
297
298 polled = FALSE;
299
300 /* pa_log_debug("Filling up"); */
301
302 for (;;) {
303 pa_memchunk chunk;
304 void *p;
305 int err;
306 const snd_pcm_channel_area_t *areas;
307 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
308 snd_pcm_sframes_t sframes;
309
310 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
311
312 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
313
314 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
315 continue;
316
317 return r;
318 }
319
320 /* Make sure that if these memblocks need to be copied they will fit into one slot */
321 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
322 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
323
324 /* Check these are multiples of 8 bit */
325 pa_assert((areas[0].first & 7) == 0);
326 pa_assert((areas[0].step & 7)== 0);
327
328 /* We assume a single interleaved memory buffer */
329 pa_assert((areas[0].first >> 3) == 0);
330 pa_assert((areas[0].step >> 3) == u->frame_size);
331
332 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
333
334 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
335 chunk.length = pa_memblock_get_length(chunk.memblock);
336 chunk.index = 0;
337
338 pa_sink_render_into_full(u->sink, &chunk);
339
340 /* FIXME: Maybe we can do something to keep this memory block
341 * a little bit longer around? */
342 pa_memblock_unref_fixed(chunk.memblock);
343
344 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
345
346 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
347 continue;
348
349 return r;
350 }
351
352 work_done = 1;
353
354 u->frame_index += (int64_t) frames;
355 u->since_start += frames * u->frame_size;
356
357 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
358
359 if (frames >= (snd_pcm_uframes_t) n)
360 break;
361
362 n -= (snd_pcm_sframes_t) frames;
363 }
364 }
365
366 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
367 return work_done;
368 }
369
370 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
371 int work_done = 0;
372 pa_usec_t max_sleep_usec = 0, process_usec = 0;
373 size_t left_to_play;
374
375 pa_assert(u);
376 pa_sink_assert_ref(u->sink);
377
378 if (u->use_tsched)
379 hw_sleep_time(u, &max_sleep_usec, &process_usec);
380
381 for (;;) {
382 snd_pcm_sframes_t n;
383 int r;
384
385 snd_pcm_hwsync(u->pcm_handle);
386
387 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
388
389 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
390 continue;
391
392 return r;
393 }
394
395 left_to_play = check_left_to_play(u, n);
396
397 if (u->use_tsched)
398
399 /* We won't fill up the playback buffer before at least
400 * half the sleep time is over because otherwise we might
401 * ask for more data from the clients then they expect. We
402 * need to guarantee that clients only have to keep around
403 * a single hw buffer length. */
404
405 if (!polled &&
406 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
407 break;
408
409 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
410
411 if (polled)
412 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
413 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
414 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
415
416 break;
417 }
418
419 n -= u->hwbuf_unused_frames;
420
421 polled = FALSE;
422
423 for (;;) {
424 snd_pcm_sframes_t frames;
425 void *p;
426
427 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
428
429 if (u->memchunk.length <= 0)
430 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
431
432 pa_assert(u->memchunk.length > 0);
433
434 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
435
436 if (frames > n)
437 frames = n;
438
439 p = pa_memblock_acquire(u->memchunk.memblock);
440 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
441 pa_memblock_release(u->memchunk.memblock);
442
443 pa_assert(frames != 0);
444
445 if (PA_UNLIKELY(frames < 0)) {
446
447 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
448 continue;
449
450 return r;
451 }
452
453 u->memchunk.index += (size_t) frames * u->frame_size;
454 u->memchunk.length -= (size_t) frames * u->frame_size;
455
456 if (u->memchunk.length <= 0) {
457 pa_memblock_unref(u->memchunk.memblock);
458 pa_memchunk_reset(&u->memchunk);
459 }
460
461 work_done = 1;
462
463 u->frame_index += frames;
464 u->since_start += (size_t) frames * u->frame_size;
465
466 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
467
468 if (frames >= n)
469 break;
470
471 n -= frames;
472 }
473 }
474
475 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
476 return work_done;
477 }
478
479 static void update_smoother(struct userdata *u) {
480 snd_pcm_sframes_t delay = 0;
481 int64_t frames;
482 int err;
483 pa_usec_t now1, now2;
484 /* struct timeval timestamp; */
485 snd_pcm_status_t *status;
486
487 snd_pcm_status_alloca(&status);
488
489 pa_assert(u);
490 pa_assert(u->pcm_handle);
491
492 /* Let's update the time smoother */
493
494 snd_pcm_hwsync(u->pcm_handle);
495 snd_pcm_avail_update(u->pcm_handle);
496
497 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
498 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
499 /* return; */
500 /* } */
501
502 /* delay = snd_pcm_status_get_delay(status); */
503
504 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
505 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
506 return;
507 }
508
509 frames = u->frame_index - delay;
510
511 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
512
513 /* snd_pcm_status_get_tstamp(status, &timestamp); */
514 /* pa_rtclock_from_wallclock(&timestamp); */
515 /* now1 = pa_timeval_load(&timestamp); */
516
517 now1 = pa_rtclock_usec();
518 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
519 pa_smoother_put(u->smoother, now1, now2);
520 }
521
522 static pa_usec_t sink_get_latency(struct userdata *u) {
523 pa_usec_t r = 0;
524 int64_t delay;
525 pa_usec_t now1, now2;
526
527 pa_assert(u);
528
529 now1 = pa_rtclock_usec();
530 now2 = pa_smoother_get(u->smoother, now1);
531
532 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
533
534 if (delay > 0)
535 r = (pa_usec_t) delay;
536
537 if (u->memchunk.memblock)
538 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
539
540 return r;
541 }
542
543 static int build_pollfd(struct userdata *u) {
544 pa_assert(u);
545 pa_assert(u->pcm_handle);
546
547 if (u->alsa_rtpoll_item)
548 pa_rtpoll_item_free(u->alsa_rtpoll_item);
549
550 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
551 return -1;
552
553 return 0;
554 }
555
556 static int suspend(struct userdata *u) {
557 pa_assert(u);
558 pa_assert(u->pcm_handle);
559
560 pa_smoother_pause(u->smoother, pa_rtclock_usec());
561
562 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
563 * take awfully long with our long buffer sizes today. */
564 snd_pcm_close(u->pcm_handle);
565 u->pcm_handle = NULL;
566
567 if (u->alsa_rtpoll_item) {
568 pa_rtpoll_item_free(u->alsa_rtpoll_item);
569 u->alsa_rtpoll_item = NULL;
570 }
571
572 pa_log_info("Device suspended...");
573
574 return 0;
575 }
576
577 static int update_sw_params(struct userdata *u) {
578 snd_pcm_uframes_t avail_min;
579 int err;
580
581 pa_assert(u);
582
583 /* Use the full buffer if noone asked us for anything specific */
584 u->hwbuf_unused_frames = 0;
585
586 if (u->use_tsched) {
587 pa_usec_t latency;
588
589 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
590 size_t b;
591
592 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
593
594 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
595
596 /* We need at least one sample in our buffer */
597
598 if (PA_UNLIKELY(b < u->frame_size))
599 b = u->frame_size;
600
601 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
602 (PA_LIKELY(b < u->hwbuf_size) ?
603 ((u->hwbuf_size - b) / u->frame_size) : 0);
604 }
605
606 fix_tsched_watermark(u);
607 }
608
609 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
610
611 /* We need at last one frame in the used part of the buffer */
612 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
613
614 if (u->use_tsched) {
615 pa_usec_t sleep_usec, process_usec;
616
617 hw_sleep_time(u, &sleep_usec, &process_usec);
618 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
619 }
620
621 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
622
623 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
624 pa_log("Failed to set software parameters: %s", snd_strerror(err));
625 return err;
626 }
627
628 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
629
630 return 0;
631 }
632
633 static int unsuspend(struct userdata *u) {
634 pa_sample_spec ss;
635 int err;
636 pa_bool_t b, d;
637 unsigned nfrags;
638 snd_pcm_uframes_t period_size;
639
640 pa_assert(u);
641 pa_assert(!u->pcm_handle);
642
643 pa_log_info("Trying resume...");
644
645 snd_config_update_free_global();
646 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
647 /*SND_PCM_NONBLOCK|*/
648 SND_PCM_NO_AUTO_RESAMPLE|
649 SND_PCM_NO_AUTO_CHANNELS|
650 SND_PCM_NO_AUTO_FORMAT)) < 0) {
651 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
652 goto fail;
653 }
654
655 ss = u->sink->sample_spec;
656 nfrags = u->nfragments;
657 period_size = u->fragment_size / u->frame_size;
658 b = u->use_mmap;
659 d = u->use_tsched;
660
661 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
662 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
663 goto fail;
664 }
665
666 if (b != u->use_mmap || d != u->use_tsched) {
667 pa_log_warn("Resume failed, couldn't get original access mode.");
668 goto fail;
669 }
670
671 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
672 pa_log_warn("Resume failed, couldn't restore original sample settings.");
673 goto fail;
674 }
675
676 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
677 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
678 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
679 (unsigned long) nfrags, period_size * u->frame_size);
680 goto fail;
681 }
682
683 if (update_sw_params(u) < 0)
684 goto fail;
685
686 if (build_pollfd(u) < 0)
687 goto fail;
688
689 /* FIXME: We need to reload the volume somehow */
690
691 u->first = TRUE;
692 u->since_start = 0;
693
694 pa_log_info("Resumed successfully...");
695
696 return 0;
697
698 fail:
699 if (u->pcm_handle) {
700 snd_pcm_close(u->pcm_handle);
701 u->pcm_handle = NULL;
702 }
703
704 return -1;
705 }
706
707 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
708 struct userdata *u = PA_SINK(o)->userdata;
709
710 switch (code) {
711
712 case PA_SINK_MESSAGE_GET_LATENCY: {
713 pa_usec_t r = 0;
714
715 if (u->pcm_handle)
716 r = sink_get_latency(u);
717
718 *((pa_usec_t*) data) = r;
719
720 return 0;
721 }
722
723 case PA_SINK_MESSAGE_SET_STATE:
724
725 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
726
727 case PA_SINK_SUSPENDED:
728 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
729
730 if (suspend(u) < 0)
731 return -1;
732
733 break;
734
735 case PA_SINK_IDLE:
736 case PA_SINK_RUNNING:
737
738 if (u->sink->thread_info.state == PA_SINK_INIT) {
739 if (build_pollfd(u) < 0)
740 return -1;
741 }
742
743 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
744 if (unsuspend(u) < 0)
745 return -1;
746 }
747
748 break;
749
750 case PA_SINK_UNLINKED:
751 case PA_SINK_INIT:
752 ;
753 }
754
755 break;
756 }
757
758 return pa_sink_process_msg(o, code, data, offset, chunk);
759 }
760
761 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
762 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
763
764 pa_assert(u);
765 pa_assert(u->mixer_handle);
766
767 if (mask == SND_CTL_EVENT_MASK_REMOVE)
768 return 0;
769
770 if (mask & SND_CTL_EVENT_MASK_VALUE) {
771 pa_sink_get_volume(u->sink, TRUE);
772 pa_sink_get_mute(u->sink, TRUE);
773 }
774
775 return 0;
776 }
777
778 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
779
780 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
781 (double) (u->hw_volume_max - u->hw_volume_min));
782 }
783
784 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
785 long alsa_vol;
786
787 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
788 / PA_VOLUME_NORM) + u->hw_volume_min;
789
790 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
791 }
792
793 static int sink_get_volume_cb(pa_sink *s) {
794 struct userdata *u = s->userdata;
795 int err;
796 unsigned i;
797 pa_cvolume r;
798 char t[PA_CVOLUME_SNPRINT_MAX];
799
800 pa_assert(u);
801 pa_assert(u->mixer_elem);
802
803 if (u->mixer_seperate_channels) {
804
805 r.channels = s->sample_spec.channels;
806
807 for (i = 0; i < s->sample_spec.channels; i++) {
808 long alsa_vol;
809
810 if (u->hw_dB_supported) {
811
812 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
813 goto fail;
814
815 #ifdef HAVE_VALGRIND_MEMCHECK_H
816 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
817 #endif
818
819 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
820 } else {
821
822 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
823 goto fail;
824
825 r.values[i] = from_alsa_volume(u, alsa_vol);
826 }
827 }
828
829 } else {
830 long alsa_vol;
831
832 if (u->hw_dB_supported) {
833
834 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
835 goto fail;
836
837 #ifdef HAVE_VALGRIND_MEMCHECK_H
838 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
839 #endif
840
841 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
842
843 } else {
844
845 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
846 goto fail;
847
848 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
849 }
850 }
851
852 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
853
854 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
855
856 u->hardware_volume = s->volume = r;
857
858 if (u->hw_dB_supported) {
859 pa_cvolume reset;
860
861 /* Hmm, so the hardware volume changed, let's reset our software volume */
862
863 pa_cvolume_reset(&reset, s->sample_spec.channels);
864 pa_sink_set_soft_volume(s, &reset);
865 }
866 }
867
868 return 0;
869
870 fail:
871 pa_log_error("Unable to read volume: %s", snd_strerror(err));
872
873 return -1;
874 }
875
876 static int sink_set_volume_cb(pa_sink *s) {
877 struct userdata *u = s->userdata;
878 int err;
879 unsigned i;
880 pa_cvolume r;
881
882 pa_assert(u);
883 pa_assert(u->mixer_elem);
884
885 if (u->mixer_seperate_channels) {
886
887 r.channels = s->sample_spec.channels;
888
889 for (i = 0; i < s->sample_spec.channels; i++) {
890 long alsa_vol;
891 pa_volume_t vol;
892
893 vol = s->volume.values[i];
894
895 if (u->hw_dB_supported) {
896
897 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
898 alsa_vol += u->hw_dB_max;
899 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
900
901 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
902 goto fail;
903
904 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
905 goto fail;
906
907 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
908
909 } else {
910 alsa_vol = to_alsa_volume(u, vol);
911
912 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
913 goto fail;
914
915 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
916 goto fail;
917
918 r.values[i] = from_alsa_volume(u, alsa_vol);
919 }
920 }
921
922 } else {
923 pa_volume_t vol;
924 long alsa_vol;
925
926 vol = pa_cvolume_max(&s->volume);
927
928 if (u->hw_dB_supported) {
929 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
930 alsa_vol += u->hw_dB_max;
931 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
932
933 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
934 goto fail;
935
936 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
937 goto fail;
938
939 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
940
941 } else {
942 alsa_vol = to_alsa_volume(u, vol);
943
944 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
945 goto fail;
946
947 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
948 goto fail;
949
950 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
951 }
952 }
953
954 u->hardware_volume = r;
955
956 if (u->hw_dB_supported) {
957 char t[PA_CVOLUME_SNPRINT_MAX];
958
959 /* Match exactly what the user requested by software */
960
961 pa_sw_cvolume_divide(&r, &s->volume, &r);
962 pa_sink_set_soft_volume(s, &r);
963
964 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
965 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
966 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
967
968 } else
969
970 /* We can't match exactly what the user requested, hence let's
971 * at least tell the user about it */
972
973 s->volume = r;
974
975 return 0;
976
977 fail:
978 pa_log_error("Unable to set volume: %s", snd_strerror(err));
979
980 return -1;
981 }
982
983 static int sink_get_mute_cb(pa_sink *s) {
984 struct userdata *u = s->userdata;
985 int err, sw;
986
987 pa_assert(u);
988 pa_assert(u->mixer_elem);
989
990 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
991 pa_log_error("Unable to get switch: %s", snd_strerror(err));
992 return -1;
993 }
994
995 s->muted = !sw;
996
997 return 0;
998 }
999
1000 static int sink_set_mute_cb(pa_sink *s) {
1001 struct userdata *u = s->userdata;
1002 int err;
1003
1004 pa_assert(u);
1005 pa_assert(u->mixer_elem);
1006
1007 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
1008 pa_log_error("Unable to set switch: %s", snd_strerror(err));
1009 return -1;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static void sink_update_requested_latency_cb(pa_sink *s) {
1016 struct userdata *u = s->userdata;
1017 snd_pcm_sframes_t before;
1018 pa_assert(u);
1019
1020 if (!u->pcm_handle)
1021 return;
1022
1023 before = u->hwbuf_unused_frames;
1024 update_sw_params(u);
1025
1026 /* Let's check whether we now use only a smaller part of the
1027 buffer then before. If so, we need to make sure that subsequent
1028 rewinds are relative to the new maxium fill level and not to the
1029 current fill level. Thus, let's do a full rewind once, to clear
1030 things up. */
1031
1032 if (u->hwbuf_unused_frames > before) {
1033 pa_log_debug("Requesting rewind due to latency change.");
1034 pa_sink_request_rewind(s, (size_t) -1);
1035 }
1036 }
1037
1038 static int process_rewind(struct userdata *u) {
1039 snd_pcm_sframes_t unused;
1040 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1041 pa_assert(u);
1042
1043 /* Figure out how much we shall rewind and reset the counter */
1044 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1045 u->sink->thread_info.rewind_nbytes = 0;
1046
1047 if (rewind_nbytes <= 0)
1048 goto finish;
1049
1050 pa_assert(rewind_nbytes > 0);
1051 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1052
1053 snd_pcm_hwsync(u->pcm_handle);
1054 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1055 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1056 return -1;
1057 }
1058
1059 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1060
1061 if (u->hwbuf_size > unused_nbytes)
1062 limit_nbytes = u->hwbuf_size - unused_nbytes;
1063 else
1064 limit_nbytes = 0;
1065
1066 if (rewind_nbytes > limit_nbytes)
1067 rewind_nbytes = limit_nbytes;
1068
1069 if (rewind_nbytes > 0) {
1070 snd_pcm_sframes_t in_frames, out_frames;
1071
1072 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1073
1074 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1075 pa_log_debug("before: %lu", (unsigned long) in_frames);
1076 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1077 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1078 return -1;
1079 }
1080 pa_log_debug("after: %lu", (unsigned long) out_frames);
1081
1082 rewind_nbytes = (size_t) out_frames * u->frame_size;
1083
1084 if (rewind_nbytes <= 0)
1085 pa_log_info("Tried rewind, but was apparently not possible.");
1086 else {
1087 u->frame_index -= out_frames;
1088 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1089 pa_sink_process_rewind(u->sink, rewind_nbytes);
1090
1091 u->after_rewind = TRUE;
1092 return 0;
1093 }
1094 } else
1095 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1096
1097 finish:
1098
1099 pa_sink_process_rewind(u->sink, 0);
1100
1101 return 0;
1102
1103 }
1104
1105 static void thread_func(void *userdata) {
1106 struct userdata *u = userdata;
1107 unsigned short revents = 0;
1108
1109 pa_assert(u);
1110
1111 pa_log_debug("Thread starting up");
1112
1113 if (u->core->realtime_scheduling)
1114 pa_make_realtime(u->core->realtime_priority);
1115
1116 pa_thread_mq_install(&u->thread_mq);
1117 pa_rtpoll_install(u->rtpoll);
1118
1119 for (;;) {
1120 int ret;
1121
1122 /* pa_log_debug("loop"); */
1123
1124 /* Render some data and write it to the dsp */
1125 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1126 int work_done;
1127 pa_usec_t sleep_usec = 0;
1128
1129 if (u->sink->thread_info.rewind_requested)
1130 if (process_rewind(u) < 0)
1131 goto fail;
1132
1133 if (u->use_mmap)
1134 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1135 else
1136 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1137
1138 if (work_done < 0)
1139 goto fail;
1140
1141 /* pa_log_debug("work_done = %i", work_done); */
1142
1143 if (work_done) {
1144
1145 if (u->first) {
1146 pa_log_info("Starting playback.");
1147 snd_pcm_start(u->pcm_handle);
1148
1149 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1150 }
1151
1152 update_smoother(u);
1153 }
1154
1155 if (u->use_tsched) {
1156 pa_usec_t cusec;
1157
1158 if (u->since_start <= u->hwbuf_size) {
1159
1160 /* USB devices on ALSA seem to hit a buffer
1161 * underrun during the first iterations much
1162 * quicker then we calculate here, probably due to
1163 * the transport latency. To accomodate for that
1164 * we artificially decrease the sleep time until
1165 * we have filled the buffer at least once
1166 * completely.*/
1167
1168 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1169 sleep_usec /= 2;
1170 }
1171
1172 /* OK, the playback buffer is now full, let's
1173 * calculate when to wake up next */
1174 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1175
1176 /* Convert from the sound card time domain to the
1177 * system time domain */
1178 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1179
1180 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1181
1182 /* We don't trust the conversion, so we wake up whatever comes first */
1183 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1184 }
1185
1186 u->first = FALSE;
1187 u->after_rewind = FALSE;
1188
1189 } else if (u->use_tsched)
1190
1191 /* OK, we're in an invalid state, let's disable our timers */
1192 pa_rtpoll_set_timer_disabled(u->rtpoll);
1193
1194 /* Hmm, nothing to do. Let's sleep */
1195 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1196 goto fail;
1197
1198 if (ret == 0)
1199 goto finish;
1200
1201 /* Tell ALSA about this and process its response */
1202 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1203 struct pollfd *pollfd;
1204 int err;
1205 unsigned n;
1206
1207 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1208
1209 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1210 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1211 goto fail;
1212 }
1213
1214 if (revents & (POLLIN|POLLERR|POLLNVAL|POLLHUP|POLLPRI)) {
1215 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1216 goto fail;
1217
1218 u->first = TRUE;
1219 u->since_start = 0;
1220 }
1221
1222 if (revents && u->use_tsched)
1223 pa_log_debug("Wakeup from ALSA!%s%s", (revents & POLLIN) ? " INPUT" : "", (revents & POLLOUT) ? " OUTPUT" : "");
1224 } else
1225 revents = 0;
1226 }
1227
1228 fail:
1229 /* If this was no regular exit from the loop we have to continue
1230 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1231 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1232 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1233
1234 finish:
1235 pa_log_debug("Thread shutting down");
1236 }
1237
1238 int pa__init(pa_module*m) {
1239
1240 pa_modargs *ma = NULL;
1241 struct userdata *u = NULL;
1242 const char *dev_id;
1243 pa_sample_spec ss;
1244 pa_channel_map map;
1245 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1246 snd_pcm_uframes_t period_frames, tsched_frames;
1247 size_t frame_size;
1248 snd_pcm_info_t *pcm_info = NULL;
1249 int err;
1250 const char *name;
1251 char *name_buf = NULL;
1252 pa_bool_t namereg_fail;
1253 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1254 pa_usec_t usec;
1255 pa_sink_new_data data;
1256 const char *profile_description = NULL, *profile_name = NULL;
1257
1258 snd_pcm_info_alloca(&pcm_info);
1259
1260 pa_assert(m);
1261
1262 pa_alsa_redirect_errors_inc();
1263
1264 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1265 pa_log("Failed to parse module arguments");
1266 goto fail;
1267 }
1268
1269 ss = m->core->default_sample_spec;
1270 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1271 pa_log("Failed to parse sample specification and channel map");
1272 goto fail;
1273 }
1274
1275 frame_size = pa_frame_size(&ss);
1276
1277 nfrags = m->core->default_n_fragments;
1278 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1279 if (frag_size <= 0)
1280 frag_size = (uint32_t) frame_size;
1281 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1282 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1283
1284 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1285 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1286 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1287 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1288 pa_log("Failed to parse buffer metrics");
1289 goto fail;
1290 }
1291
1292 hwbuf_size = frag_size * nfrags;
1293 period_frames = frag_size/frame_size;
1294 tsched_frames = tsched_size/frame_size;
1295
1296 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1297 pa_log("Failed to parse mmap argument.");
1298 goto fail;
1299 }
1300
1301 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1302 pa_log("Failed to parse tsched argument.");
1303 goto fail;
1304 }
1305
1306 if (use_tsched && !pa_rtclock_hrtimer()) {
1307 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1308 use_tsched = FALSE;
1309 }
1310
1311 m->userdata = u = pa_xnew0(struct userdata, 1);
1312 u->core = m->core;
1313 u->module = m;
1314 u->use_mmap = use_mmap;
1315 u->use_tsched = use_tsched;
1316 u->first = TRUE;
1317 u->since_start = 0;
1318 u->after_rewind = FALSE;
1319 u->rtpoll = pa_rtpoll_new();
1320 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1321 u->alsa_rtpoll_item = NULL;
1322
1323 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1324 usec = pa_rtclock_usec();
1325 pa_smoother_set_time_offset(u->smoother, usec);
1326 pa_smoother_pause(u->smoother, usec);
1327
1328 snd_config_update_free_global();
1329
1330 b = use_mmap;
1331 d = use_tsched;
1332
1333 if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1334
1335 if (!(u->pcm_handle = pa_alsa_open_by_device_id(
1336 dev_id,
1337 &u->device_name,
1338 &ss, &map,
1339 SND_PCM_STREAM_PLAYBACK,
1340 &nfrags, &period_frames, tsched_frames,
1341 &b, &d, &profile_description, &profile_name)))
1342
1343 goto fail;
1344
1345 } else {
1346
1347 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1348 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1349 &u->device_name,
1350 &ss, &map,
1351 SND_PCM_STREAM_PLAYBACK,
1352 &nfrags, &period_frames, tsched_frames,
1353 &b, &d, FALSE)))
1354 goto fail;
1355
1356 }
1357
1358 pa_assert(u->device_name);
1359 pa_log_info("Successfully opened device %s.", u->device_name);
1360
1361 if (profile_description)
1362 pa_log_info("Selected configuration '%s' (%s).", profile_description, profile_name);
1363
1364 if (use_mmap && !b) {
1365 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1366 u->use_mmap = use_mmap = FALSE;
1367 }
1368
1369 if (use_tsched && (!b || !d)) {
1370 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1371 u->use_tsched = use_tsched = FALSE;
1372 }
1373
1374 if (u->use_mmap)
1375 pa_log_info("Successfully enabled mmap() mode.");
1376
1377 if (u->use_tsched)
1378 pa_log_info("Successfully enabled timer-based scheduling mode.");
1379
1380 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1381 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1382 goto fail;
1383 }
1384
1385 /* ALSA might tweak the sample spec, so recalculate the frame size */
1386 frame_size = pa_frame_size(&ss);
1387
1388 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1389 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1390 else {
1391 pa_bool_t found = FALSE;
1392
1393 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1394 found = TRUE;
1395 else {
1396 snd_pcm_info_t *info;
1397
1398 snd_pcm_info_alloca(&info);
1399
1400 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1401 char *md;
1402 int card;
1403
1404 if ((card = snd_pcm_info_get_card(info)) >= 0) {
1405
1406 md = pa_sprintf_malloc("hw:%i", card);
1407
1408 if (strcmp(u->device_name, md))
1409 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1410 found = TRUE;
1411 pa_xfree(md);
1412 }
1413 }
1414 }
1415
1416 if (found)
1417 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM", TRUE)))
1418 found = FALSE;
1419
1420 if (!found) {
1421 snd_mixer_close(u->mixer_handle);
1422 u->mixer_handle = NULL;
1423 }
1424 }
1425
1426 if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
1427 namereg_fail = TRUE;
1428 else {
1429 name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
1430 namereg_fail = FALSE;
1431 }
1432
1433 pa_sink_new_data_init(&data);
1434 data.driver = __FILE__;
1435 data.module = m;
1436 pa_sink_new_data_set_name(&data, name);
1437 data.namereg_fail = namereg_fail;
1438 pa_sink_new_data_set_sample_spec(&data, &ss);
1439 pa_sink_new_data_set_channel_map(&data, &map);
1440
1441 pa_alsa_init_proplist_pcm(data.proplist, pcm_info);
1442 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1443 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1444 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1445 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1446
1447 if (profile_name)
1448 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile_name);
1449 if (profile_description)
1450 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile_description);
1451
1452 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1453 pa_sink_new_data_done(&data);
1454 pa_xfree(name_buf);
1455
1456 if (!u->sink) {
1457 pa_log("Failed to create sink object");
1458 goto fail;
1459 }
1460
1461 u->sink->parent.process_msg = sink_process_msg;
1462 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1463 u->sink->userdata = u;
1464
1465 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1466 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1467
1468 u->frame_size = frame_size;
1469 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1470 u->nfragments = nfrags;
1471 u->hwbuf_size = u->fragment_size * nfrags;
1472 u->hwbuf_unused_frames = 0;
1473 u->tsched_watermark = tsched_watermark;
1474 u->frame_index = 0;
1475 u->hw_dB_supported = FALSE;
1476 u->hw_dB_min = u->hw_dB_max = 0;
1477 u->hw_volume_min = u->hw_volume_max = 0;
1478 u->mixer_seperate_channels = FALSE;
1479 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1480
1481 if (use_tsched)
1482 fix_tsched_watermark(u);
1483
1484 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1485 u->sink->thread_info.max_request = u->hwbuf_size;
1486
1487 pa_sink_set_latency_range(u->sink,
1488 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1489 pa_bytes_to_usec(u->hwbuf_size, &ss));
1490
1491 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1492 nfrags, (long unsigned) u->fragment_size,
1493 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1494
1495 if (use_tsched)
1496 pa_log_info("Time scheduling watermark is %0.2fms",
1497 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1498
1499 if (update_sw_params(u) < 0)
1500 goto fail;
1501
1502 pa_memchunk_reset(&u->memchunk);
1503
1504 if (u->mixer_handle) {
1505 pa_assert(u->mixer_elem);
1506
1507 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1508 pa_bool_t suitable = FALSE;
1509
1510 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1511 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1512 else if (u->hw_volume_min >= u->hw_volume_max)
1513 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1514 else {
1515 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1516 suitable = TRUE;
1517 }
1518
1519 if (suitable) {
1520 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1521 pa_log_info("Mixer doesn't support dB information.");
1522 else {
1523 #ifdef HAVE_VALGRIND_MEMCHECK_H
1524 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1525 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1526 #endif
1527
1528 if (u->hw_dB_min >= u->hw_dB_max)
1529 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1530 else {
1531 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1532 u->hw_dB_supported = TRUE;
1533
1534 if (u->hw_dB_max > 0) {
1535 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1536 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1537 } else
1538 pa_log_info("No particular base volume set, fixing to 0 dB");
1539 }
1540 }
1541
1542 if (!u->hw_dB_supported &&
1543 u->hw_volume_max - u->hw_volume_min < 3) {
1544
1545 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1546 suitable = FALSE;
1547 }
1548 }
1549
1550 if (suitable) {
1551 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1552
1553 u->sink->get_volume = sink_get_volume_cb;
1554 u->sink->set_volume = sink_set_volume_cb;
1555 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1556 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1557
1558 } else
1559 pa_log_info("Using software volume control.");
1560 }
1561
1562 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1563 u->sink->get_mute = sink_get_mute_cb;
1564 u->sink->set_mute = sink_set_mute_cb;
1565 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1566 } else
1567 pa_log_info("Using software mute control.");
1568
1569 u->mixer_fdl = pa_alsa_fdlist_new();
1570
1571 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1572 pa_log("Failed to initialize file descriptor monitoring");
1573 goto fail;
1574 }
1575
1576 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1577 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1578 } else
1579 u->mixer_fdl = NULL;
1580
1581 pa_alsa_dump(u->pcm_handle);
1582
1583 if (!(u->thread = pa_thread_new(thread_func, u))) {
1584 pa_log("Failed to create thread.");
1585 goto fail;
1586 }
1587
1588 /* Get initial mixer settings */
1589 if (data.volume_is_set) {
1590 if (u->sink->set_volume)
1591 u->sink->set_volume(u->sink);
1592 } else {
1593 if (u->sink->get_volume)
1594 u->sink->get_volume(u->sink);
1595 }
1596
1597 if (data.muted_is_set) {
1598 if (u->sink->set_mute)
1599 u->sink->set_mute(u->sink);
1600 } else {
1601 if (u->sink->get_mute)
1602 u->sink->get_mute(u->sink);
1603 }
1604
1605 pa_sink_put(u->sink);
1606
1607 pa_modargs_free(ma);
1608
1609 return 0;
1610
1611 fail:
1612
1613 if (ma)
1614 pa_modargs_free(ma);
1615
1616 pa__done(m);
1617
1618 return -1;
1619 }
1620
1621 int pa__get_n_used(pa_module *m) {
1622 struct userdata *u;
1623
1624 pa_assert(m);
1625 pa_assert_se(u = m->userdata);
1626
1627 return pa_sink_linked_by(u->sink);
1628 }
1629
1630 void pa__done(pa_module*m) {
1631 struct userdata *u;
1632
1633 pa_assert(m);
1634
1635 if (!(u = m->userdata))
1636 goto finish;
1637
1638 if (u->sink)
1639 pa_sink_unlink(u->sink);
1640
1641 if (u->thread) {
1642 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1643 pa_thread_free(u->thread);
1644 }
1645
1646 pa_thread_mq_done(&u->thread_mq);
1647
1648 if (u->sink)
1649 pa_sink_unref(u->sink);
1650
1651 if (u->memchunk.memblock)
1652 pa_memblock_unref(u->memchunk.memblock);
1653
1654 if (u->alsa_rtpoll_item)
1655 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1656
1657 if (u->rtpoll)
1658 pa_rtpoll_free(u->rtpoll);
1659
1660 if (u->mixer_fdl)
1661 pa_alsa_fdlist_free(u->mixer_fdl);
1662
1663 if (u->mixer_handle)
1664 snd_mixer_close(u->mixer_handle);
1665
1666 if (u->pcm_handle) {
1667 snd_pcm_drop(u->pcm_handle);
1668 snd_pcm_close(u->pcm_handle);
1669 }
1670
1671 if (u->smoother)
1672 pa_smoother_free(u->smoother);
1673
1674 pa_xfree(u->device_name);
1675 pa_xfree(u);
1676
1677 finish:
1678
1679 snd_config_update_free_global();
1680 pa_alsa_redirect_errors_dec();
1681 }