]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'elmarco/bt-wip'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "alsa-sink.h"
57
58 #define DEFAULT_DEVICE "default"
59 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
60 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
61 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
62 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
63
64 struct userdata {
65 pa_core *core;
66 pa_module *module;
67 pa_sink *sink;
68
69 pa_thread *thread;
70 pa_thread_mq thread_mq;
71 pa_rtpoll *rtpoll;
72
73 snd_pcm_t *pcm_handle;
74
75 pa_alsa_fdlist *mixer_fdl;
76 snd_mixer_t *mixer_handle;
77 snd_mixer_elem_t *mixer_elem;
78 long hw_volume_max, hw_volume_min;
79 long hw_dB_max, hw_dB_min;
80 pa_bool_t hw_dB_supported;
81 pa_bool_t mixer_seperate_channels;
82 pa_cvolume hardware_volume;
83
84 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
85 unsigned nfragments;
86 pa_memchunk memchunk;
87
88 char *device_name;
89
90 pa_bool_t use_mmap, use_tsched;
91
92 pa_bool_t first, after_rewind;
93
94 pa_rtpoll_item *alsa_rtpoll_item;
95
96 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
97
98 pa_smoother *smoother;
99 int64_t frame_index;
100 uint64_t since_start;
101
102 snd_pcm_sframes_t hwbuf_unused_frames;
103 };
104
105 static void userdata_free(struct userdata *u);
106
107 static void fix_tsched_watermark(struct userdata *u) {
108 size_t max_use;
109 size_t min_sleep, min_wakeup;
110 pa_assert(u);
111
112 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
113
114 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
115 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
116
117 if (min_sleep > max_use/2)
118 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
119 if (min_sleep < u->frame_size)
120 min_sleep = u->frame_size;
121
122 if (min_wakeup > max_use/2)
123 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
124 if (min_wakeup < u->frame_size)
125 min_wakeup = u->frame_size;
126
127 if (u->tsched_watermark > max_use-min_sleep)
128 u->tsched_watermark = max_use-min_sleep;
129
130 if (u->tsched_watermark < min_wakeup)
131 u->tsched_watermark = min_wakeup;
132 }
133
134 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
135 pa_usec_t usec, wm;
136
137 pa_assert(sleep_usec);
138 pa_assert(process_usec);
139
140 pa_assert(u);
141
142 usec = pa_sink_get_requested_latency_within_thread(u->sink);
143
144 if (usec == (pa_usec_t) -1)
145 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
146
147 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
148
149 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
150
151 if (usec >= wm) {
152 *sleep_usec = usec - wm;
153 *process_usec = wm;
154 } else
155 *process_usec = *sleep_usec = usec / 2;
156
157 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
158 }
159
160 static int try_recover(struct userdata *u, const char *call, int err) {
161 pa_assert(u);
162 pa_assert(call);
163 pa_assert(err < 0);
164
165 pa_log_debug("%s: %s", call, snd_strerror(err));
166
167 pa_assert(err != -EAGAIN);
168
169 if (err == -EPIPE)
170 pa_log_debug("%s: Buffer underrun!", call);
171
172 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
173 u->first = TRUE;
174 u->since_start = 0;
175 return 0;
176 }
177
178 pa_log("%s: %s", call, snd_strerror(err));
179 return -1;
180 }
181
182 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
183 size_t left_to_play;
184
185 if ((size_t) n*u->frame_size < u->hwbuf_size)
186 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
187 else
188 left_to_play = 0;
189
190 if (left_to_play > 0) {
191 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
192 } else if (!u->first && !u->after_rewind) {
193
194 if (pa_log_ratelimit())
195 pa_log_info("Underrun!");
196
197 if (u->use_tsched) {
198 size_t old_watermark = u->tsched_watermark;
199
200 u->tsched_watermark *= 2;
201 fix_tsched_watermark(u);
202
203 if (old_watermark != u->tsched_watermark)
204 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
205 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
206 }
207 }
208
209 return left_to_play;
210 }
211
212 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
213 int work_done = 0;
214 pa_usec_t max_sleep_usec = 0, process_usec = 0;
215 size_t left_to_play;
216
217 pa_assert(u);
218 pa_sink_assert_ref(u->sink);
219
220 if (u->use_tsched)
221 hw_sleep_time(u, &max_sleep_usec, &process_usec);
222
223 for (;;) {
224 snd_pcm_sframes_t n;
225 int r;
226
227 snd_pcm_hwsync(u->pcm_handle);
228
229 /* First we determine how many samples are missing to fill the
230 * buffer up to 100% */
231
232 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
233
234 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
235 continue;
236
237 return r;
238 }
239
240 left_to_play = check_left_to_play(u, n);
241
242 if (u->use_tsched)
243
244 /* We won't fill up the playback buffer before at least
245 * half the sleep time is over because otherwise we might
246 * ask for more data from the clients then they expect. We
247 * need to guarantee that clients only have to keep around
248 * a single hw buffer length. */
249
250 if (!polled &&
251 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
252 break;
253
254 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
255
256 if (polled && pa_log_ratelimit())
257 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
258 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
259 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
260
261 break;
262 }
263
264 n -= u->hwbuf_unused_frames;
265
266 polled = FALSE;
267
268 /* pa_log_debug("Filling up"); */
269
270 for (;;) {
271 pa_memchunk chunk;
272 void *p;
273 int err;
274 const snd_pcm_channel_area_t *areas;
275 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
276 snd_pcm_sframes_t sframes;
277
278 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
279
280 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
281
282 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
283 continue;
284
285 return r;
286 }
287
288 /* Make sure that if these memblocks need to be copied they will fit into one slot */
289 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
290 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
291
292 /* Check these are multiples of 8 bit */
293 pa_assert((areas[0].first & 7) == 0);
294 pa_assert((areas[0].step & 7)== 0);
295
296 /* We assume a single interleaved memory buffer */
297 pa_assert((areas[0].first >> 3) == 0);
298 pa_assert((areas[0].step >> 3) == u->frame_size);
299
300 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
301
302 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
303 chunk.length = pa_memblock_get_length(chunk.memblock);
304 chunk.index = 0;
305
306 pa_sink_render_into_full(u->sink, &chunk);
307
308 /* FIXME: Maybe we can do something to keep this memory block
309 * a little bit longer around? */
310 pa_memblock_unref_fixed(chunk.memblock);
311
312 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
313
314 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
315 continue;
316
317 return r;
318 }
319
320 work_done = 1;
321
322 u->frame_index += (int64_t) frames;
323 u->since_start += frames * u->frame_size;
324
325 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
326
327 if (frames >= (snd_pcm_uframes_t) n)
328 break;
329
330 n -= (snd_pcm_sframes_t) frames;
331 }
332 }
333
334 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
335 return work_done;
336 }
337
338 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
339 int work_done = 0;
340 pa_usec_t max_sleep_usec = 0, process_usec = 0;
341 size_t left_to_play;
342
343 pa_assert(u);
344 pa_sink_assert_ref(u->sink);
345
346 if (u->use_tsched)
347 hw_sleep_time(u, &max_sleep_usec, &process_usec);
348
349 for (;;) {
350 snd_pcm_sframes_t n;
351 int r;
352
353 snd_pcm_hwsync(u->pcm_handle);
354
355 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
356
357 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
358 continue;
359
360 return r;
361 }
362
363 left_to_play = check_left_to_play(u, n);
364
365 if (u->use_tsched)
366
367 /* We won't fill up the playback buffer before at least
368 * half the sleep time is over because otherwise we might
369 * ask for more data from the clients then they expect. We
370 * need to guarantee that clients only have to keep around
371 * a single hw buffer length. */
372
373 if (!polled &&
374 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
375 break;
376
377 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
378
379 if (polled && pa_log_ratelimit())
380 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
381 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
382 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
383
384 break;
385 }
386
387 n -= u->hwbuf_unused_frames;
388
389 polled = FALSE;
390
391 for (;;) {
392 snd_pcm_sframes_t frames;
393 void *p;
394
395 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
396
397 if (u->memchunk.length <= 0)
398 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
399
400 pa_assert(u->memchunk.length > 0);
401
402 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
403
404 if (frames > n)
405 frames = n;
406
407 p = pa_memblock_acquire(u->memchunk.memblock);
408 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
409 pa_memblock_release(u->memchunk.memblock);
410
411 pa_assert(frames != 0);
412
413 if (PA_UNLIKELY(frames < 0)) {
414
415 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
416 continue;
417
418 return r;
419 }
420
421 u->memchunk.index += (size_t) frames * u->frame_size;
422 u->memchunk.length -= (size_t) frames * u->frame_size;
423
424 if (u->memchunk.length <= 0) {
425 pa_memblock_unref(u->memchunk.memblock);
426 pa_memchunk_reset(&u->memchunk);
427 }
428
429 work_done = 1;
430
431 u->frame_index += frames;
432 u->since_start += (size_t) frames * u->frame_size;
433
434 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
435
436 if (frames >= n)
437 break;
438
439 n -= frames;
440 }
441 }
442
443 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
444 return work_done;
445 }
446
447 static void update_smoother(struct userdata *u) {
448 snd_pcm_sframes_t delay = 0;
449 int64_t frames;
450 int err;
451 pa_usec_t now1, now2;
452 /* struct timeval timestamp; */
453 snd_pcm_status_t *status;
454
455 snd_pcm_status_alloca(&status);
456
457 pa_assert(u);
458 pa_assert(u->pcm_handle);
459
460 /* Let's update the time smoother */
461
462 snd_pcm_hwsync(u->pcm_handle);
463 snd_pcm_avail_update(u->pcm_handle);
464
465 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
466 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
467 /* return; */
468 /* } */
469
470 /* delay = snd_pcm_status_get_delay(status); */
471
472 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
473 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
474 return;
475 }
476
477 frames = u->frame_index - delay;
478
479 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
480
481 /* snd_pcm_status_get_tstamp(status, &timestamp); */
482 /* pa_rtclock_from_wallclock(&timestamp); */
483 /* now1 = pa_timeval_load(&timestamp); */
484
485 now1 = pa_rtclock_usec();
486 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
487 pa_smoother_put(u->smoother, now1, now2);
488 }
489
490 static pa_usec_t sink_get_latency(struct userdata *u) {
491 pa_usec_t r = 0;
492 int64_t delay;
493 pa_usec_t now1, now2;
494
495 pa_assert(u);
496
497 now1 = pa_rtclock_usec();
498 now2 = pa_smoother_get(u->smoother, now1);
499
500 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
501
502 if (delay > 0)
503 r = (pa_usec_t) delay;
504
505 if (u->memchunk.memblock)
506 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
507
508 return r;
509 }
510
511 static int build_pollfd(struct userdata *u) {
512 pa_assert(u);
513 pa_assert(u->pcm_handle);
514
515 if (u->alsa_rtpoll_item)
516 pa_rtpoll_item_free(u->alsa_rtpoll_item);
517
518 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
519 return -1;
520
521 return 0;
522 }
523
524 static int suspend(struct userdata *u) {
525 pa_assert(u);
526 pa_assert(u->pcm_handle);
527
528 pa_smoother_pause(u->smoother, pa_rtclock_usec());
529
530 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
531 * take awfully long with our long buffer sizes today. */
532 snd_pcm_close(u->pcm_handle);
533 u->pcm_handle = NULL;
534
535 if (u->alsa_rtpoll_item) {
536 pa_rtpoll_item_free(u->alsa_rtpoll_item);
537 u->alsa_rtpoll_item = NULL;
538 }
539
540 pa_log_info("Device suspended...");
541
542 return 0;
543 }
544
545 static int update_sw_params(struct userdata *u) {
546 snd_pcm_uframes_t avail_min;
547 int err;
548
549 pa_assert(u);
550
551 /* Use the full buffer if noone asked us for anything specific */
552 u->hwbuf_unused_frames = 0;
553
554 if (u->use_tsched) {
555 pa_usec_t latency;
556
557 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
558 size_t b;
559
560 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
561
562 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
563
564 /* We need at least one sample in our buffer */
565
566 if (PA_UNLIKELY(b < u->frame_size))
567 b = u->frame_size;
568
569 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
570 (PA_LIKELY(b < u->hwbuf_size) ?
571 ((u->hwbuf_size - b) / u->frame_size) : 0);
572 }
573
574 fix_tsched_watermark(u);
575 }
576
577 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
578
579 /* We need at last one frame in the used part of the buffer */
580 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
581
582 if (u->use_tsched) {
583 pa_usec_t sleep_usec, process_usec;
584
585 hw_sleep_time(u, &sleep_usec, &process_usec);
586 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
587 }
588
589 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
590
591 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
592 pa_log("Failed to set software parameters: %s", snd_strerror(err));
593 return err;
594 }
595
596 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
597
598 return 0;
599 }
600
601 static int unsuspend(struct userdata *u) {
602 pa_sample_spec ss;
603 int err;
604 pa_bool_t b, d;
605 unsigned nfrags;
606 snd_pcm_uframes_t period_size;
607
608 pa_assert(u);
609 pa_assert(!u->pcm_handle);
610
611 pa_log_info("Trying resume...");
612
613 snd_config_update_free_global();
614 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
615 /*SND_PCM_NONBLOCK|*/
616 SND_PCM_NO_AUTO_RESAMPLE|
617 SND_PCM_NO_AUTO_CHANNELS|
618 SND_PCM_NO_AUTO_FORMAT)) < 0) {
619 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
620 goto fail;
621 }
622
623 ss = u->sink->sample_spec;
624 nfrags = u->nfragments;
625 period_size = u->fragment_size / u->frame_size;
626 b = u->use_mmap;
627 d = u->use_tsched;
628
629 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
630 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
631 goto fail;
632 }
633
634 if (b != u->use_mmap || d != u->use_tsched) {
635 pa_log_warn("Resume failed, couldn't get original access mode.");
636 goto fail;
637 }
638
639 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
640 pa_log_warn("Resume failed, couldn't restore original sample settings.");
641 goto fail;
642 }
643
644 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
645 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
646 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
647 (unsigned long) nfrags, period_size * u->frame_size);
648 goto fail;
649 }
650
651 if (update_sw_params(u) < 0)
652 goto fail;
653
654 if (build_pollfd(u) < 0)
655 goto fail;
656
657 /* FIXME: We need to reload the volume somehow */
658
659 u->first = TRUE;
660 u->since_start = 0;
661
662 pa_log_info("Resumed successfully...");
663
664 return 0;
665
666 fail:
667 if (u->pcm_handle) {
668 snd_pcm_close(u->pcm_handle);
669 u->pcm_handle = NULL;
670 }
671
672 return -1;
673 }
674
675 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
676 struct userdata *u = PA_SINK(o)->userdata;
677
678 switch (code) {
679
680 case PA_SINK_MESSAGE_GET_LATENCY: {
681 pa_usec_t r = 0;
682
683 if (u->pcm_handle)
684 r = sink_get_latency(u);
685
686 *((pa_usec_t*) data) = r;
687
688 return 0;
689 }
690
691 case PA_SINK_MESSAGE_SET_STATE:
692
693 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
694
695 case PA_SINK_SUSPENDED:
696 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
697
698 if (suspend(u) < 0)
699 return -1;
700
701 break;
702
703 case PA_SINK_IDLE:
704 case PA_SINK_RUNNING:
705
706 if (u->sink->thread_info.state == PA_SINK_INIT) {
707 if (build_pollfd(u) < 0)
708 return -1;
709 }
710
711 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
712 if (unsuspend(u) < 0)
713 return -1;
714 }
715
716 break;
717
718 case PA_SINK_UNLINKED:
719 case PA_SINK_INIT:
720 case PA_SINK_INVALID_STATE:
721 ;
722 }
723
724 break;
725 }
726
727 return pa_sink_process_msg(o, code, data, offset, chunk);
728 }
729
730 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
731 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
732
733 pa_assert(u);
734 pa_assert(u->mixer_handle);
735
736 if (mask == SND_CTL_EVENT_MASK_REMOVE)
737 return 0;
738
739 if (mask & SND_CTL_EVENT_MASK_VALUE) {
740 pa_sink_get_volume(u->sink, TRUE);
741 pa_sink_get_mute(u->sink, TRUE);
742 }
743
744 return 0;
745 }
746
747 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
748
749 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
750 (double) (u->hw_volume_max - u->hw_volume_min));
751 }
752
753 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
754 long alsa_vol;
755
756 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
757 / PA_VOLUME_NORM) + u->hw_volume_min;
758
759 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
760 }
761
762 static void sink_get_volume_cb(pa_sink *s) {
763 struct userdata *u = s->userdata;
764 int err;
765 unsigned i;
766 pa_cvolume r;
767 char t[PA_CVOLUME_SNPRINT_MAX];
768
769 pa_assert(u);
770 pa_assert(u->mixer_elem);
771
772 if (u->mixer_seperate_channels) {
773
774 r.channels = s->sample_spec.channels;
775
776 for (i = 0; i < s->sample_spec.channels; i++) {
777 long alsa_vol;
778
779 if (u->hw_dB_supported) {
780
781 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
782 goto fail;
783
784 #ifdef HAVE_VALGRIND_MEMCHECK_H
785 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
786 #endif
787
788 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
789 } else {
790
791 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
792 goto fail;
793
794 r.values[i] = from_alsa_volume(u, alsa_vol);
795 }
796 }
797
798 } else {
799 long alsa_vol;
800
801 if (u->hw_dB_supported) {
802
803 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
804 goto fail;
805
806 #ifdef HAVE_VALGRIND_MEMCHECK_H
807 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
808 #endif
809
810 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
811
812 } else {
813
814 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
815 goto fail;
816
817 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
818 }
819 }
820
821 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
822
823 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
824
825 s->virtual_volume = u->hardware_volume = r;
826
827 if (u->hw_dB_supported) {
828 pa_cvolume reset;
829
830 /* Hmm, so the hardware volume changed, let's reset our software volume */
831 pa_cvolume_reset(&reset, s->sample_spec.channels);
832 pa_sink_set_soft_volume(s, &reset);
833 }
834 }
835
836 return;
837
838 fail:
839 pa_log_error("Unable to read volume: %s", snd_strerror(err));
840 }
841
842 static void sink_set_volume_cb(pa_sink *s) {
843 struct userdata *u = s->userdata;
844 int err;
845 unsigned i;
846 pa_cvolume r;
847
848 pa_assert(u);
849 pa_assert(u->mixer_elem);
850
851 if (u->mixer_seperate_channels) {
852
853 r.channels = s->sample_spec.channels;
854
855 for (i = 0; i < s->sample_spec.channels; i++) {
856 long alsa_vol;
857 pa_volume_t vol;
858
859 vol = s->virtual_volume.values[i];
860
861 if (u->hw_dB_supported) {
862
863 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
864 alsa_vol += u->hw_dB_max;
865 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
866
867 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
868 goto fail;
869
870 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
871 goto fail;
872
873 #ifdef HAVE_VALGRIND_MEMCHECK_H
874 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
875 #endif
876
877 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
878
879 } else {
880 alsa_vol = to_alsa_volume(u, vol);
881
882 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
883 goto fail;
884
885 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
886 goto fail;
887
888 r.values[i] = from_alsa_volume(u, alsa_vol);
889 }
890 }
891
892 } else {
893 pa_volume_t vol;
894 long alsa_vol;
895
896 vol = pa_cvolume_max(&s->virtual_volume);
897
898 if (u->hw_dB_supported) {
899 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
900 alsa_vol += u->hw_dB_max;
901 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
902
903 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
904 goto fail;
905
906 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
907 goto fail;
908
909 #ifdef HAVE_VALGRIND_MEMCHECK_H
910 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
911 #endif
912
913 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
914
915 } else {
916 alsa_vol = to_alsa_volume(u, vol);
917
918 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
919 goto fail;
920
921 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
922 goto fail;
923
924 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
925 }
926 }
927
928 u->hardware_volume = r;
929
930 if (u->hw_dB_supported) {
931 char t[PA_CVOLUME_SNPRINT_MAX];
932
933 /* Match exactly what the user requested by software */
934 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
935
936 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
937 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
938 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
939
940 } else
941
942 /* We can't match exactly what the user requested, hence let's
943 * at least tell the user about it */
944
945 s->virtual_volume = r;
946
947 return;
948
949 fail:
950 pa_log_error("Unable to set volume: %s", snd_strerror(err));
951 }
952
953 static void sink_get_mute_cb(pa_sink *s) {
954 struct userdata *u = s->userdata;
955 int err, sw;
956
957 pa_assert(u);
958 pa_assert(u->mixer_elem);
959
960 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
961 pa_log_error("Unable to get switch: %s", snd_strerror(err));
962 return;
963 }
964
965 s->muted = !sw;
966 }
967
968 static void sink_set_mute_cb(pa_sink *s) {
969 struct userdata *u = s->userdata;
970 int err;
971
972 pa_assert(u);
973 pa_assert(u->mixer_elem);
974
975 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
976 pa_log_error("Unable to set switch: %s", snd_strerror(err));
977 return;
978 }
979 }
980
981 static void sink_update_requested_latency_cb(pa_sink *s) {
982 struct userdata *u = s->userdata;
983 snd_pcm_sframes_t before;
984 pa_assert(u);
985
986 if (!u->pcm_handle)
987 return;
988
989 before = u->hwbuf_unused_frames;
990 update_sw_params(u);
991
992 /* Let's check whether we now use only a smaller part of the
993 buffer then before. If so, we need to make sure that subsequent
994 rewinds are relative to the new maxium fill level and not to the
995 current fill level. Thus, let's do a full rewind once, to clear
996 things up. */
997
998 if (u->hwbuf_unused_frames > before) {
999 pa_log_debug("Requesting rewind due to latency change.");
1000 pa_sink_request_rewind(s, (size_t) -1);
1001 }
1002 }
1003
1004 static int process_rewind(struct userdata *u) {
1005 snd_pcm_sframes_t unused;
1006 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1007 pa_assert(u);
1008
1009 /* Figure out how much we shall rewind and reset the counter */
1010 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1011 u->sink->thread_info.rewind_nbytes = 0;
1012
1013 if (rewind_nbytes <= 0)
1014 goto finish;
1015
1016 pa_assert(rewind_nbytes > 0);
1017 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1018
1019 snd_pcm_hwsync(u->pcm_handle);
1020 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1021 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1022 return -1;
1023 }
1024
1025 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1026
1027 if (u->hwbuf_size > unused_nbytes)
1028 limit_nbytes = u->hwbuf_size - unused_nbytes;
1029 else
1030 limit_nbytes = 0;
1031
1032 if (rewind_nbytes > limit_nbytes)
1033 rewind_nbytes = limit_nbytes;
1034
1035 if (rewind_nbytes > 0) {
1036 snd_pcm_sframes_t in_frames, out_frames;
1037
1038 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1039
1040 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1041 pa_log_debug("before: %lu", (unsigned long) in_frames);
1042 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1043 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1044 return -1;
1045 }
1046 pa_log_debug("after: %lu", (unsigned long) out_frames);
1047
1048 rewind_nbytes = (size_t) out_frames * u->frame_size;
1049
1050 if (rewind_nbytes <= 0)
1051 pa_log_info("Tried rewind, but was apparently not possible.");
1052 else {
1053 u->frame_index -= out_frames;
1054 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1055 pa_sink_process_rewind(u->sink, rewind_nbytes);
1056
1057 u->after_rewind = TRUE;
1058 return 0;
1059 }
1060 } else
1061 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1062
1063 finish:
1064
1065 pa_sink_process_rewind(u->sink, 0);
1066
1067 return 0;
1068
1069 }
1070
1071 static void thread_func(void *userdata) {
1072 struct userdata *u = userdata;
1073 unsigned short revents = 0;
1074
1075 pa_assert(u);
1076
1077 pa_log_debug("Thread starting up");
1078
1079 if (u->core->realtime_scheduling)
1080 pa_make_realtime(u->core->realtime_priority);
1081
1082 pa_thread_mq_install(&u->thread_mq);
1083 pa_rtpoll_install(u->rtpoll);
1084
1085 for (;;) {
1086 int ret;
1087
1088 /* pa_log_debug("loop"); */
1089
1090 /* Render some data and write it to the dsp */
1091 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1092 int work_done;
1093 pa_usec_t sleep_usec = 0;
1094
1095 if (u->sink->thread_info.rewind_requested)
1096 if (process_rewind(u) < 0)
1097 goto fail;
1098
1099 if (u->use_mmap)
1100 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1101 else
1102 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1103
1104 if (work_done < 0)
1105 goto fail;
1106
1107 /* pa_log_debug("work_done = %i", work_done); */
1108
1109 if (work_done) {
1110
1111 if (u->first) {
1112 pa_log_info("Starting playback.");
1113 snd_pcm_start(u->pcm_handle);
1114
1115 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1116 }
1117
1118 update_smoother(u);
1119 }
1120
1121 if (u->use_tsched) {
1122 pa_usec_t cusec;
1123
1124 if (u->since_start <= u->hwbuf_size) {
1125
1126 /* USB devices on ALSA seem to hit a buffer
1127 * underrun during the first iterations much
1128 * quicker then we calculate here, probably due to
1129 * the transport latency. To accomodate for that
1130 * we artificially decrease the sleep time until
1131 * we have filled the buffer at least once
1132 * completely.*/
1133
1134 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1135 sleep_usec /= 2;
1136 }
1137
1138 /* OK, the playback buffer is now full, let's
1139 * calculate when to wake up next */
1140 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1141
1142 /* Convert from the sound card time domain to the
1143 * system time domain */
1144 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1145
1146 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1147
1148 /* We don't trust the conversion, so we wake up whatever comes first */
1149 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1150 }
1151
1152 u->first = FALSE;
1153 u->after_rewind = FALSE;
1154
1155 } else if (u->use_tsched)
1156
1157 /* OK, we're in an invalid state, let's disable our timers */
1158 pa_rtpoll_set_timer_disabled(u->rtpoll);
1159
1160 /* Hmm, nothing to do. Let's sleep */
1161 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1162 goto fail;
1163
1164 if (ret == 0)
1165 goto finish;
1166
1167 /* Tell ALSA about this and process its response */
1168 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1169 struct pollfd *pollfd;
1170 int err;
1171 unsigned n;
1172
1173 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1174
1175 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1176 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1177 goto fail;
1178 }
1179
1180 if (revents & (POLLIN|POLLERR|POLLNVAL|POLLHUP|POLLPRI)) {
1181 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1182 goto fail;
1183
1184 u->first = TRUE;
1185 u->since_start = 0;
1186 }
1187
1188 if (revents && u->use_tsched && pa_log_ratelimit())
1189 pa_log_debug("Wakeup from ALSA!%s%s", (revents & POLLIN) ? " INPUT" : "", (revents & POLLOUT) ? " OUTPUT" : "");
1190 } else
1191 revents = 0;
1192 }
1193
1194 fail:
1195 /* If this was no regular exit from the loop we have to continue
1196 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1197 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1198 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1199
1200 finish:
1201 pa_log_debug("Thread shutting down");
1202 }
1203
1204 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1205 const char *n;
1206 char *t;
1207
1208 pa_assert(data);
1209 pa_assert(ma);
1210 pa_assert(device_name);
1211
1212 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1213 pa_sink_new_data_set_name(data, n);
1214 data->namereg_fail = TRUE;
1215 return;
1216 }
1217
1218 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1219 data->namereg_fail = TRUE;
1220 else {
1221 n = device_id ? device_id : device_name;
1222 data->namereg_fail = FALSE;
1223 }
1224
1225 t = pa_sprintf_malloc("alsa_output.%s", n);
1226 pa_sink_new_data_set_name(data, t);
1227 pa_xfree(t);
1228 }
1229
1230 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1231
1232 struct userdata *u = NULL;
1233 const char *dev_id = NULL;
1234 pa_sample_spec ss;
1235 pa_channel_map map;
1236 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1237 snd_pcm_uframes_t period_frames, tsched_frames;
1238 size_t frame_size;
1239 snd_pcm_info_t *pcm_info = NULL;
1240 int err;
1241 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1242 pa_usec_t usec;
1243 pa_sink_new_data data;
1244
1245 snd_pcm_info_alloca(&pcm_info);
1246
1247 pa_assert(m);
1248 pa_assert(ma);
1249
1250 ss = m->core->default_sample_spec;
1251 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1252 pa_log("Failed to parse sample specification and channel map");
1253 goto fail;
1254 }
1255
1256 frame_size = pa_frame_size(&ss);
1257
1258 nfrags = m->core->default_n_fragments;
1259 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1260 if (frag_size <= 0)
1261 frag_size = (uint32_t) frame_size;
1262 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1263 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1264
1265 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1266 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1267 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1268 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1269 pa_log("Failed to parse buffer metrics");
1270 goto fail;
1271 }
1272
1273 hwbuf_size = frag_size * nfrags;
1274 period_frames = frag_size/frame_size;
1275 tsched_frames = tsched_size/frame_size;
1276
1277 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1278 pa_log("Failed to parse mmap argument.");
1279 goto fail;
1280 }
1281
1282 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1283 pa_log("Failed to parse tsched argument.");
1284 goto fail;
1285 }
1286
1287 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1288 pa_log("Failed to parse ignore_dB argument.");
1289 goto fail;
1290 }
1291
1292 if (use_tsched && !pa_rtclock_hrtimer()) {
1293 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1294 use_tsched = FALSE;
1295 }
1296
1297 u = pa_xnew0(struct userdata, 1);
1298 u->core = m->core;
1299 u->module = m;
1300 u->use_mmap = use_mmap;
1301 u->use_tsched = use_tsched;
1302 u->first = TRUE;
1303 u->since_start = 0;
1304 u->after_rewind = FALSE;
1305 u->rtpoll = pa_rtpoll_new();
1306 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1307 u->alsa_rtpoll_item = NULL;
1308
1309 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1310 usec = pa_rtclock_usec();
1311 pa_smoother_set_time_offset(u->smoother, usec);
1312 pa_smoother_pause(u->smoother, usec);
1313
1314 b = use_mmap;
1315 d = use_tsched;
1316
1317 if (profile) {
1318
1319 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1320 pa_log("device_id= not set");
1321 goto fail;
1322 }
1323
1324 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1325 dev_id,
1326 &u->device_name,
1327 &ss, &map,
1328 SND_PCM_STREAM_PLAYBACK,
1329 &nfrags, &period_frames, tsched_frames,
1330 &b, &d, profile)))
1331
1332 goto fail;
1333
1334 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1335
1336 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1337 dev_id,
1338 &u->device_name,
1339 &ss, &map,
1340 SND_PCM_STREAM_PLAYBACK,
1341 &nfrags, &period_frames, tsched_frames,
1342 &b, &d, &profile)))
1343
1344 goto fail;
1345
1346 } else {
1347
1348 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1349 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1350 &u->device_name,
1351 &ss, &map,
1352 SND_PCM_STREAM_PLAYBACK,
1353 &nfrags, &period_frames, tsched_frames,
1354 &b, &d, FALSE)))
1355 goto fail;
1356
1357 }
1358
1359 pa_assert(u->device_name);
1360 pa_log_info("Successfully opened device %s.", u->device_name);
1361
1362 if (profile)
1363 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1364
1365 if (use_mmap && !b) {
1366 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1367 u->use_mmap = use_mmap = FALSE;
1368 }
1369
1370 if (use_tsched && (!b || !d)) {
1371 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1372 u->use_tsched = use_tsched = FALSE;
1373 }
1374
1375 if (u->use_mmap)
1376 pa_log_info("Successfully enabled mmap() mode.");
1377
1378 if (u->use_tsched)
1379 pa_log_info("Successfully enabled timer-based scheduling mode.");
1380
1381 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1382 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1383 goto fail;
1384 }
1385
1386 /* ALSA might tweak the sample spec, so recalculate the frame size */
1387 frame_size = pa_frame_size(&ss);
1388
1389 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1390 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1391 else {
1392 pa_bool_t found = FALSE;
1393
1394 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1395 found = TRUE;
1396 else {
1397 snd_pcm_info_t *info;
1398
1399 snd_pcm_info_alloca(&info);
1400
1401 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1402 char *md;
1403 int card_idx;
1404
1405 if ((card_idx = snd_pcm_info_get_card(info)) >= 0) {
1406
1407 md = pa_sprintf_malloc("hw:%i", card_idx);
1408
1409 if (strcmp(u->device_name, md))
1410 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1411 found = TRUE;
1412 pa_xfree(md);
1413 }
1414 }
1415 }
1416
1417 if (found)
1418 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM", TRUE)))
1419 found = FALSE;
1420
1421 if (!found) {
1422 snd_mixer_close(u->mixer_handle);
1423 u->mixer_handle = NULL;
1424 }
1425 }
1426
1427 pa_sink_new_data_init(&data);
1428 data.driver = driver;
1429 data.module = m;
1430 data.card = card;
1431 set_sink_name(&data, ma, dev_id, u->device_name);
1432 pa_sink_new_data_set_sample_spec(&data, &ss);
1433 pa_sink_new_data_set_channel_map(&data, &map);
1434
1435 pa_alsa_init_proplist_pcm(m->core, data.proplist, pcm_info);
1436 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1437 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1438 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1439 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1440
1441 if (profile) {
1442 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1443 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1444 }
1445
1446 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1447 pa_sink_new_data_done(&data);
1448
1449 if (!u->sink) {
1450 pa_log("Failed to create sink object");
1451 goto fail;
1452 }
1453
1454 u->sink->parent.process_msg = sink_process_msg;
1455 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1456 u->sink->userdata = u;
1457
1458 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1459 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1460
1461 u->frame_size = frame_size;
1462 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1463 u->nfragments = nfrags;
1464 u->hwbuf_size = u->fragment_size * nfrags;
1465 u->hwbuf_unused_frames = 0;
1466 u->tsched_watermark = tsched_watermark;
1467 u->frame_index = 0;
1468 u->hw_dB_supported = FALSE;
1469 u->hw_dB_min = u->hw_dB_max = 0;
1470 u->hw_volume_min = u->hw_volume_max = 0;
1471 u->mixer_seperate_channels = FALSE;
1472 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1473
1474 if (use_tsched)
1475 fix_tsched_watermark(u);
1476
1477 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1478 u->sink->thread_info.max_request = u->hwbuf_size;
1479
1480 pa_sink_set_latency_range(u->sink,
1481 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1482 pa_bytes_to_usec(u->hwbuf_size, &ss));
1483
1484 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1485 nfrags, (long unsigned) u->fragment_size,
1486 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1487
1488 if (use_tsched)
1489 pa_log_info("Time scheduling watermark is %0.2fms",
1490 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1491
1492 if (update_sw_params(u) < 0)
1493 goto fail;
1494
1495 pa_memchunk_reset(&u->memchunk);
1496
1497 if (u->mixer_handle) {
1498 pa_assert(u->mixer_elem);
1499
1500 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1501 pa_bool_t suitable = FALSE;
1502
1503 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1504 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1505 else if (u->hw_volume_min >= u->hw_volume_max)
1506 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1507 else {
1508 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1509 suitable = TRUE;
1510 }
1511
1512 if (suitable) {
1513 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1514 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1515 else {
1516 #ifdef HAVE_VALGRIND_MEMCHECK_H
1517 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1518 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1519 #endif
1520
1521 if (u->hw_dB_min >= u->hw_dB_max)
1522 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1523 else {
1524 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1525 u->hw_dB_supported = TRUE;
1526
1527 if (u->hw_dB_max > 0) {
1528 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1529 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1530 } else
1531 pa_log_info("No particular base volume set, fixing to 0 dB");
1532 }
1533 }
1534
1535 if (!u->hw_dB_supported &&
1536 u->hw_volume_max - u->hw_volume_min < 3) {
1537
1538 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1539 suitable = FALSE;
1540 }
1541 }
1542
1543 if (suitable) {
1544 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1545
1546 u->sink->get_volume = sink_get_volume_cb;
1547 u->sink->set_volume = sink_set_volume_cb;
1548 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1549 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1550
1551 if (!u->hw_dB_supported)
1552 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1553 } else
1554 pa_log_info("Using software volume control.");
1555 }
1556
1557 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1558 u->sink->get_mute = sink_get_mute_cb;
1559 u->sink->set_mute = sink_set_mute_cb;
1560 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1561 } else
1562 pa_log_info("Using software mute control.");
1563
1564 u->mixer_fdl = pa_alsa_fdlist_new();
1565
1566 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1567 pa_log("Failed to initialize file descriptor monitoring");
1568 goto fail;
1569 }
1570
1571 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1572 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1573 } else
1574 u->mixer_fdl = NULL;
1575
1576 pa_alsa_dump(u->pcm_handle);
1577
1578 if (!(u->thread = pa_thread_new(thread_func, u))) {
1579 pa_log("Failed to create thread.");
1580 goto fail;
1581 }
1582
1583 /* Get initial mixer settings */
1584 if (data.volume_is_set) {
1585 if (u->sink->set_volume)
1586 u->sink->set_volume(u->sink);
1587 } else {
1588 if (u->sink->get_volume)
1589 u->sink->get_volume(u->sink);
1590 }
1591
1592 if (data.muted_is_set) {
1593 if (u->sink->set_mute)
1594 u->sink->set_mute(u->sink);
1595 } else {
1596 if (u->sink->get_mute)
1597 u->sink->get_mute(u->sink);
1598 }
1599
1600 pa_sink_put(u->sink);
1601
1602 return u->sink;
1603
1604 fail:
1605
1606 userdata_free(u);
1607
1608 return NULL;
1609 }
1610
1611 static void userdata_free(struct userdata *u) {
1612 pa_assert(u);
1613
1614 if (u->sink)
1615 pa_sink_unlink(u->sink);
1616
1617 if (u->thread) {
1618 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1619 pa_thread_free(u->thread);
1620 }
1621
1622 pa_thread_mq_done(&u->thread_mq);
1623
1624 if (u->sink)
1625 pa_sink_unref(u->sink);
1626
1627 if (u->memchunk.memblock)
1628 pa_memblock_unref(u->memchunk.memblock);
1629
1630 if (u->alsa_rtpoll_item)
1631 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1632
1633 if (u->rtpoll)
1634 pa_rtpoll_free(u->rtpoll);
1635
1636 if (u->mixer_fdl)
1637 pa_alsa_fdlist_free(u->mixer_fdl);
1638
1639 if (u->mixer_handle)
1640 snd_mixer_close(u->mixer_handle);
1641
1642 if (u->pcm_handle) {
1643 snd_pcm_drop(u->pcm_handle);
1644 snd_pcm_close(u->pcm_handle);
1645 }
1646
1647 if (u->smoother)
1648 pa_smoother_free(u->smoother);
1649
1650 pa_xfree(u->device_name);
1651 pa_xfree(u);
1652 }
1653
1654 void pa_alsa_sink_free(pa_sink *s) {
1655 struct userdata *u;
1656
1657 pa_sink_assert_ref(s);
1658 pa_assert_se(u = s->userdata);
1659
1660 userdata_free(u);
1661 }