]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
5020eac14c2a8871d8e493d75009ff1a6aa98e92
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "alsa-sink.h"
57
58 #define DEFAULT_DEVICE "default"
59 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
60 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
61 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
62 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
63
64 struct userdata {
65 pa_core *core;
66 pa_module *module;
67 pa_sink *sink;
68
69 pa_thread *thread;
70 pa_thread_mq thread_mq;
71 pa_rtpoll *rtpoll;
72
73 snd_pcm_t *pcm_handle;
74
75 pa_alsa_fdlist *mixer_fdl;
76 snd_mixer_t *mixer_handle;
77 snd_mixer_elem_t *mixer_elem;
78 long hw_volume_max, hw_volume_min;
79 long hw_dB_max, hw_dB_min;
80 pa_bool_t hw_dB_supported;
81 pa_bool_t mixer_seperate_channels;
82 pa_cvolume hardware_volume;
83
84 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
85 unsigned nfragments;
86 pa_memchunk memchunk;
87
88 char *device_name;
89
90 pa_bool_t use_mmap, use_tsched;
91
92 pa_bool_t first, after_rewind;
93
94 pa_rtpoll_item *alsa_rtpoll_item;
95
96 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
97
98 pa_smoother *smoother;
99 int64_t frame_index;
100 uint64_t since_start;
101
102 snd_pcm_sframes_t hwbuf_unused_frames;
103 };
104
105 static void userdata_free(struct userdata *u);
106
107 static void fix_tsched_watermark(struct userdata *u) {
108 size_t max_use;
109 size_t min_sleep, min_wakeup;
110 pa_assert(u);
111
112 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
113
114 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
115 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
116
117 if (min_sleep > max_use/2)
118 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
119 if (min_sleep < u->frame_size)
120 min_sleep = u->frame_size;
121
122 if (min_wakeup > max_use/2)
123 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
124 if (min_wakeup < u->frame_size)
125 min_wakeup = u->frame_size;
126
127 if (u->tsched_watermark > max_use-min_sleep)
128 u->tsched_watermark = max_use-min_sleep;
129
130 if (u->tsched_watermark < min_wakeup)
131 u->tsched_watermark = min_wakeup;
132 }
133
134 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
135 pa_usec_t usec, wm;
136
137 pa_assert(sleep_usec);
138 pa_assert(process_usec);
139
140 pa_assert(u);
141
142 usec = pa_sink_get_requested_latency_within_thread(u->sink);
143
144 if (usec == (pa_usec_t) -1)
145 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
146
147 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
148
149 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
150
151 if (usec >= wm) {
152 *sleep_usec = usec - wm;
153 *process_usec = wm;
154 } else
155 *process_usec = *sleep_usec = usec / 2;
156
157 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
158 }
159
160 static int try_recover(struct userdata *u, const char *call, int err) {
161 pa_assert(u);
162 pa_assert(call);
163 pa_assert(err < 0);
164
165 pa_log_debug("%s: %s", call, snd_strerror(err));
166
167 pa_assert(err != -EAGAIN);
168
169 if (err == -EPIPE)
170 pa_log_debug("%s: Buffer underrun!", call);
171
172 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
173 u->first = TRUE;
174 u->since_start = 0;
175 return 0;
176 }
177
178 pa_log("%s: %s", call, snd_strerror(err));
179 return -1;
180 }
181
182 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
183 size_t left_to_play;
184
185 if ((size_t) n*u->frame_size < u->hwbuf_size)
186 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
187 else
188 left_to_play = 0;
189
190 if (left_to_play > 0) {
191 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
192 } else if (!u->first && !u->after_rewind) {
193 pa_log_info("Underrun!");
194
195 if (u->use_tsched) {
196 size_t old_watermark = u->tsched_watermark;
197
198 u->tsched_watermark *= 2;
199 fix_tsched_watermark(u);
200
201 if (old_watermark != u->tsched_watermark)
202 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
203 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
204 }
205 }
206
207 return left_to_play;
208 }
209
210 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
211 int work_done = 0;
212 pa_usec_t max_sleep_usec = 0, process_usec = 0;
213 size_t left_to_play;
214
215 pa_assert(u);
216 pa_sink_assert_ref(u->sink);
217
218 if (u->use_tsched)
219 hw_sleep_time(u, &max_sleep_usec, &process_usec);
220
221 for (;;) {
222 snd_pcm_sframes_t n;
223 int r;
224
225 snd_pcm_hwsync(u->pcm_handle);
226
227 /* First we determine how many samples are missing to fill the
228 * buffer up to 100% */
229
230 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
231
232 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
233 continue;
234
235 return r;
236 }
237
238 left_to_play = check_left_to_play(u, n);
239
240 if (u->use_tsched)
241
242 /* We won't fill up the playback buffer before at least
243 * half the sleep time is over because otherwise we might
244 * ask for more data from the clients then they expect. We
245 * need to guarantee that clients only have to keep around
246 * a single hw buffer length. */
247
248 if (!polled &&
249 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
250 break;
251
252 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
253
254 if (polled && pa_log_ratelimit())
255 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
256 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
257 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
258
259 break;
260 }
261
262 n -= u->hwbuf_unused_frames;
263
264 polled = FALSE;
265
266 /* pa_log_debug("Filling up"); */
267
268 for (;;) {
269 pa_memchunk chunk;
270 void *p;
271 int err;
272 const snd_pcm_channel_area_t *areas;
273 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
274 snd_pcm_sframes_t sframes;
275
276 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
277
278 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
279
280 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
281 continue;
282
283 return r;
284 }
285
286 /* Make sure that if these memblocks need to be copied they will fit into one slot */
287 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
288 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
289
290 /* Check these are multiples of 8 bit */
291 pa_assert((areas[0].first & 7) == 0);
292 pa_assert((areas[0].step & 7)== 0);
293
294 /* We assume a single interleaved memory buffer */
295 pa_assert((areas[0].first >> 3) == 0);
296 pa_assert((areas[0].step >> 3) == u->frame_size);
297
298 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
299
300 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
301 chunk.length = pa_memblock_get_length(chunk.memblock);
302 chunk.index = 0;
303
304 pa_sink_render_into_full(u->sink, &chunk);
305
306 /* FIXME: Maybe we can do something to keep this memory block
307 * a little bit longer around? */
308 pa_memblock_unref_fixed(chunk.memblock);
309
310 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
311
312 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
313 continue;
314
315 return r;
316 }
317
318 work_done = 1;
319
320 u->frame_index += (int64_t) frames;
321 u->since_start += frames * u->frame_size;
322
323 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
324
325 if (frames >= (snd_pcm_uframes_t) n)
326 break;
327
328 n -= (snd_pcm_sframes_t) frames;
329 }
330 }
331
332 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
333 return work_done;
334 }
335
336 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
337 int work_done = 0;
338 pa_usec_t max_sleep_usec = 0, process_usec = 0;
339 size_t left_to_play;
340
341 pa_assert(u);
342 pa_sink_assert_ref(u->sink);
343
344 if (u->use_tsched)
345 hw_sleep_time(u, &max_sleep_usec, &process_usec);
346
347 for (;;) {
348 snd_pcm_sframes_t n;
349 int r;
350
351 snd_pcm_hwsync(u->pcm_handle);
352
353 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
354
355 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
356 continue;
357
358 return r;
359 }
360
361 left_to_play = check_left_to_play(u, n);
362
363 if (u->use_tsched)
364
365 /* We won't fill up the playback buffer before at least
366 * half the sleep time is over because otherwise we might
367 * ask for more data from the clients then they expect. We
368 * need to guarantee that clients only have to keep around
369 * a single hw buffer length. */
370
371 if (!polled &&
372 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
373 break;
374
375 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
376
377 if (polled && pa_log_ratelimit())
378 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
379 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
380 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
381
382 break;
383 }
384
385 n -= u->hwbuf_unused_frames;
386
387 polled = FALSE;
388
389 for (;;) {
390 snd_pcm_sframes_t frames;
391 void *p;
392
393 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
394
395 if (u->memchunk.length <= 0)
396 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
397
398 pa_assert(u->memchunk.length > 0);
399
400 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
401
402 if (frames > n)
403 frames = n;
404
405 p = pa_memblock_acquire(u->memchunk.memblock);
406 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
407 pa_memblock_release(u->memchunk.memblock);
408
409 pa_assert(frames != 0);
410
411 if (PA_UNLIKELY(frames < 0)) {
412
413 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
414 continue;
415
416 return r;
417 }
418
419 u->memchunk.index += (size_t) frames * u->frame_size;
420 u->memchunk.length -= (size_t) frames * u->frame_size;
421
422 if (u->memchunk.length <= 0) {
423 pa_memblock_unref(u->memchunk.memblock);
424 pa_memchunk_reset(&u->memchunk);
425 }
426
427 work_done = 1;
428
429 u->frame_index += frames;
430 u->since_start += (size_t) frames * u->frame_size;
431
432 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
433
434 if (frames >= n)
435 break;
436
437 n -= frames;
438 }
439 }
440
441 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
442 return work_done;
443 }
444
445 static void update_smoother(struct userdata *u) {
446 snd_pcm_sframes_t delay = 0;
447 int64_t frames;
448 int err;
449 pa_usec_t now1, now2;
450 /* struct timeval timestamp; */
451 snd_pcm_status_t *status;
452
453 snd_pcm_status_alloca(&status);
454
455 pa_assert(u);
456 pa_assert(u->pcm_handle);
457
458 /* Let's update the time smoother */
459
460 snd_pcm_hwsync(u->pcm_handle);
461 snd_pcm_avail_update(u->pcm_handle);
462
463 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
464 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
465 /* return; */
466 /* } */
467
468 /* delay = snd_pcm_status_get_delay(status); */
469
470 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
471 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
472 return;
473 }
474
475 frames = u->frame_index - delay;
476
477 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
478
479 /* snd_pcm_status_get_tstamp(status, &timestamp); */
480 /* pa_rtclock_from_wallclock(&timestamp); */
481 /* now1 = pa_timeval_load(&timestamp); */
482
483 now1 = pa_rtclock_usec();
484 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
485 pa_smoother_put(u->smoother, now1, now2);
486 }
487
488 static pa_usec_t sink_get_latency(struct userdata *u) {
489 pa_usec_t r = 0;
490 int64_t delay;
491 pa_usec_t now1, now2;
492
493 pa_assert(u);
494
495 now1 = pa_rtclock_usec();
496 now2 = pa_smoother_get(u->smoother, now1);
497
498 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
499
500 if (delay > 0)
501 r = (pa_usec_t) delay;
502
503 if (u->memchunk.memblock)
504 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
505
506 return r;
507 }
508
509 static int build_pollfd(struct userdata *u) {
510 pa_assert(u);
511 pa_assert(u->pcm_handle);
512
513 if (u->alsa_rtpoll_item)
514 pa_rtpoll_item_free(u->alsa_rtpoll_item);
515
516 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
517 return -1;
518
519 return 0;
520 }
521
522 static int suspend(struct userdata *u) {
523 pa_assert(u);
524 pa_assert(u->pcm_handle);
525
526 pa_smoother_pause(u->smoother, pa_rtclock_usec());
527
528 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
529 * take awfully long with our long buffer sizes today. */
530 snd_pcm_close(u->pcm_handle);
531 u->pcm_handle = NULL;
532
533 if (u->alsa_rtpoll_item) {
534 pa_rtpoll_item_free(u->alsa_rtpoll_item);
535 u->alsa_rtpoll_item = NULL;
536 }
537
538 pa_log_info("Device suspended...");
539
540 return 0;
541 }
542
543 static int update_sw_params(struct userdata *u) {
544 snd_pcm_uframes_t avail_min;
545 int err;
546
547 pa_assert(u);
548
549 /* Use the full buffer if noone asked us for anything specific */
550 u->hwbuf_unused_frames = 0;
551
552 if (u->use_tsched) {
553 pa_usec_t latency;
554
555 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
556 size_t b;
557
558 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
559
560 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
561
562 /* We need at least one sample in our buffer */
563
564 if (PA_UNLIKELY(b < u->frame_size))
565 b = u->frame_size;
566
567 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
568 (PA_LIKELY(b < u->hwbuf_size) ?
569 ((u->hwbuf_size - b) / u->frame_size) : 0);
570 }
571
572 fix_tsched_watermark(u);
573 }
574
575 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
576
577 /* We need at last one frame in the used part of the buffer */
578 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
579
580 if (u->use_tsched) {
581 pa_usec_t sleep_usec, process_usec;
582
583 hw_sleep_time(u, &sleep_usec, &process_usec);
584 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
585 }
586
587 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
588
589 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
590 pa_log("Failed to set software parameters: %s", snd_strerror(err));
591 return err;
592 }
593
594 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
595
596 return 0;
597 }
598
599 static int unsuspend(struct userdata *u) {
600 pa_sample_spec ss;
601 int err;
602 pa_bool_t b, d;
603 unsigned nfrags;
604 snd_pcm_uframes_t period_size;
605
606 pa_assert(u);
607 pa_assert(!u->pcm_handle);
608
609 pa_log_info("Trying resume...");
610
611 snd_config_update_free_global();
612 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
613 /*SND_PCM_NONBLOCK|*/
614 SND_PCM_NO_AUTO_RESAMPLE|
615 SND_PCM_NO_AUTO_CHANNELS|
616 SND_PCM_NO_AUTO_FORMAT)) < 0) {
617 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
618 goto fail;
619 }
620
621 ss = u->sink->sample_spec;
622 nfrags = u->nfragments;
623 period_size = u->fragment_size / u->frame_size;
624 b = u->use_mmap;
625 d = u->use_tsched;
626
627 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
628 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
629 goto fail;
630 }
631
632 if (b != u->use_mmap || d != u->use_tsched) {
633 pa_log_warn("Resume failed, couldn't get original access mode.");
634 goto fail;
635 }
636
637 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
638 pa_log_warn("Resume failed, couldn't restore original sample settings.");
639 goto fail;
640 }
641
642 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
643 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
644 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
645 (unsigned long) nfrags, period_size * u->frame_size);
646 goto fail;
647 }
648
649 if (update_sw_params(u) < 0)
650 goto fail;
651
652 if (build_pollfd(u) < 0)
653 goto fail;
654
655 /* FIXME: We need to reload the volume somehow */
656
657 u->first = TRUE;
658 u->since_start = 0;
659
660 pa_log_info("Resumed successfully...");
661
662 return 0;
663
664 fail:
665 if (u->pcm_handle) {
666 snd_pcm_close(u->pcm_handle);
667 u->pcm_handle = NULL;
668 }
669
670 return -1;
671 }
672
673 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
674 struct userdata *u = PA_SINK(o)->userdata;
675
676 switch (code) {
677
678 case PA_SINK_MESSAGE_GET_LATENCY: {
679 pa_usec_t r = 0;
680
681 if (u->pcm_handle)
682 r = sink_get_latency(u);
683
684 *((pa_usec_t*) data) = r;
685
686 return 0;
687 }
688
689 case PA_SINK_MESSAGE_SET_STATE:
690
691 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
692
693 case PA_SINK_SUSPENDED:
694 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
695
696 if (suspend(u) < 0)
697 return -1;
698
699 break;
700
701 case PA_SINK_IDLE:
702 case PA_SINK_RUNNING:
703
704 if (u->sink->thread_info.state == PA_SINK_INIT) {
705 if (build_pollfd(u) < 0)
706 return -1;
707 }
708
709 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
710 if (unsuspend(u) < 0)
711 return -1;
712 }
713
714 break;
715
716 case PA_SINK_UNLINKED:
717 case PA_SINK_INIT:
718 case PA_SINK_INVALID_STATE:
719 ;
720 }
721
722 break;
723 }
724
725 return pa_sink_process_msg(o, code, data, offset, chunk);
726 }
727
728 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
729 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
730
731 pa_assert(u);
732 pa_assert(u->mixer_handle);
733
734 if (mask == SND_CTL_EVENT_MASK_REMOVE)
735 return 0;
736
737 if (mask & SND_CTL_EVENT_MASK_VALUE) {
738 pa_sink_get_volume(u->sink, TRUE);
739 pa_sink_get_mute(u->sink, TRUE);
740 }
741
742 return 0;
743 }
744
745 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
746
747 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
748 (double) (u->hw_volume_max - u->hw_volume_min));
749 }
750
751 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
752 long alsa_vol;
753
754 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
755 / PA_VOLUME_NORM) + u->hw_volume_min;
756
757 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
758 }
759
760 static int sink_get_volume_cb(pa_sink *s) {
761 struct userdata *u = s->userdata;
762 int err;
763 unsigned i;
764 pa_cvolume r;
765 char t[PA_CVOLUME_SNPRINT_MAX];
766
767 pa_assert(u);
768 pa_assert(u->mixer_elem);
769
770 if (u->mixer_seperate_channels) {
771
772 r.channels = s->sample_spec.channels;
773
774 for (i = 0; i < s->sample_spec.channels; i++) {
775 long alsa_vol;
776
777 if (u->hw_dB_supported) {
778
779 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
780 goto fail;
781
782 #ifdef HAVE_VALGRIND_MEMCHECK_H
783 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
784 #endif
785
786 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
787 } else {
788
789 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
790 goto fail;
791
792 r.values[i] = from_alsa_volume(u, alsa_vol);
793 }
794 }
795
796 } else {
797 long alsa_vol;
798
799 if (u->hw_dB_supported) {
800
801 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
802 goto fail;
803
804 #ifdef HAVE_VALGRIND_MEMCHECK_H
805 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
806 #endif
807
808 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
809
810 } else {
811
812 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
813 goto fail;
814
815 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
816 }
817 }
818
819 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
820
821 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
822
823 u->hardware_volume = s->volume = r;
824
825 if (u->hw_dB_supported) {
826 pa_cvolume reset;
827
828 /* Hmm, so the hardware volume changed, let's reset our software volume */
829
830 pa_cvolume_reset(&reset, s->sample_spec.channels);
831 pa_sink_set_soft_volume(s, &reset);
832 }
833 }
834
835 return 0;
836
837 fail:
838 pa_log_error("Unable to read volume: %s", snd_strerror(err));
839
840 return -1;
841 }
842
843 static int sink_set_volume_cb(pa_sink *s) {
844 struct userdata *u = s->userdata;
845 int err;
846 unsigned i;
847 pa_cvolume r;
848
849 pa_assert(u);
850 pa_assert(u->mixer_elem);
851
852 if (u->mixer_seperate_channels) {
853
854 r.channels = s->sample_spec.channels;
855
856 for (i = 0; i < s->sample_spec.channels; i++) {
857 long alsa_vol;
858 pa_volume_t vol;
859
860 vol = s->volume.values[i];
861
862 if (u->hw_dB_supported) {
863
864 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
865 alsa_vol += u->hw_dB_max;
866 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
867
868 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
869 goto fail;
870
871 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
872 goto fail;
873
874 #ifdef HAVE_VALGRIND_MEMCHECK_H
875 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
876 #endif
877
878 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
879
880 } else {
881 alsa_vol = to_alsa_volume(u, vol);
882
883 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
884 goto fail;
885
886 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
887 goto fail;
888
889 r.values[i] = from_alsa_volume(u, alsa_vol);
890 }
891 }
892
893 } else {
894 pa_volume_t vol;
895 long alsa_vol;
896
897 vol = pa_cvolume_max(&s->volume);
898
899 if (u->hw_dB_supported) {
900 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
901 alsa_vol += u->hw_dB_max;
902 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
903
904 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
905 goto fail;
906
907 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
908 goto fail;
909
910 #ifdef HAVE_VALGRIND_MEMCHECK_H
911 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
912 #endif
913
914 pa_cvolume_set(&r, s->volume.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
915
916 } else {
917 alsa_vol = to_alsa_volume(u, vol);
918
919 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
920 goto fail;
921
922 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
923 goto fail;
924
925 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
926 }
927 }
928
929 u->hardware_volume = r;
930
931 if (u->hw_dB_supported) {
932 char t[PA_CVOLUME_SNPRINT_MAX];
933
934 /* Match exactly what the user requested by software */
935
936 pa_sw_cvolume_divide(&r, &s->volume, &r);
937 pa_sink_set_soft_volume(s, &r);
938
939 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->volume));
940 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
941 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
942
943 } else
944
945 /* We can't match exactly what the user requested, hence let's
946 * at least tell the user about it */
947
948 s->volume = r;
949
950 return 0;
951
952 fail:
953 pa_log_error("Unable to set volume: %s", snd_strerror(err));
954
955 return -1;
956 }
957
958 static int sink_get_mute_cb(pa_sink *s) {
959 struct userdata *u = s->userdata;
960 int err, sw;
961
962 pa_assert(u);
963 pa_assert(u->mixer_elem);
964
965 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
966 pa_log_error("Unable to get switch: %s", snd_strerror(err));
967 return -1;
968 }
969
970 s->muted = !sw;
971
972 return 0;
973 }
974
975 static int sink_set_mute_cb(pa_sink *s) {
976 struct userdata *u = s->userdata;
977 int err;
978
979 pa_assert(u);
980 pa_assert(u->mixer_elem);
981
982 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
983 pa_log_error("Unable to set switch: %s", snd_strerror(err));
984 return -1;
985 }
986
987 return 0;
988 }
989
990 static void sink_update_requested_latency_cb(pa_sink *s) {
991 struct userdata *u = s->userdata;
992 snd_pcm_sframes_t before;
993 pa_assert(u);
994
995 if (!u->pcm_handle)
996 return;
997
998 before = u->hwbuf_unused_frames;
999 update_sw_params(u);
1000
1001 /* Let's check whether we now use only a smaller part of the
1002 buffer then before. If so, we need to make sure that subsequent
1003 rewinds are relative to the new maxium fill level and not to the
1004 current fill level. Thus, let's do a full rewind once, to clear
1005 things up. */
1006
1007 if (u->hwbuf_unused_frames > before) {
1008 pa_log_debug("Requesting rewind due to latency change.");
1009 pa_sink_request_rewind(s, (size_t) -1);
1010 }
1011 }
1012
1013 static int process_rewind(struct userdata *u) {
1014 snd_pcm_sframes_t unused;
1015 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1016 pa_assert(u);
1017
1018 /* Figure out how much we shall rewind and reset the counter */
1019 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1020 u->sink->thread_info.rewind_nbytes = 0;
1021
1022 if (rewind_nbytes <= 0)
1023 goto finish;
1024
1025 pa_assert(rewind_nbytes > 0);
1026 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1027
1028 snd_pcm_hwsync(u->pcm_handle);
1029 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1030 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1031 return -1;
1032 }
1033
1034 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1035
1036 if (u->hwbuf_size > unused_nbytes)
1037 limit_nbytes = u->hwbuf_size - unused_nbytes;
1038 else
1039 limit_nbytes = 0;
1040
1041 if (rewind_nbytes > limit_nbytes)
1042 rewind_nbytes = limit_nbytes;
1043
1044 if (rewind_nbytes > 0) {
1045 snd_pcm_sframes_t in_frames, out_frames;
1046
1047 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1048
1049 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1050 pa_log_debug("before: %lu", (unsigned long) in_frames);
1051 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1052 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1053 return -1;
1054 }
1055 pa_log_debug("after: %lu", (unsigned long) out_frames);
1056
1057 rewind_nbytes = (size_t) out_frames * u->frame_size;
1058
1059 if (rewind_nbytes <= 0)
1060 pa_log_info("Tried rewind, but was apparently not possible.");
1061 else {
1062 u->frame_index -= out_frames;
1063 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1064 pa_sink_process_rewind(u->sink, rewind_nbytes);
1065
1066 u->after_rewind = TRUE;
1067 return 0;
1068 }
1069 } else
1070 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1071
1072 finish:
1073
1074 pa_sink_process_rewind(u->sink, 0);
1075
1076 return 0;
1077
1078 }
1079
1080 static void thread_func(void *userdata) {
1081 struct userdata *u = userdata;
1082 unsigned short revents = 0;
1083
1084 pa_assert(u);
1085
1086 pa_log_debug("Thread starting up");
1087
1088 if (u->core->realtime_scheduling)
1089 pa_make_realtime(u->core->realtime_priority);
1090
1091 pa_thread_mq_install(&u->thread_mq);
1092 pa_rtpoll_install(u->rtpoll);
1093
1094 for (;;) {
1095 int ret;
1096
1097 /* pa_log_debug("loop"); */
1098
1099 /* Render some data and write it to the dsp */
1100 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1101 int work_done;
1102 pa_usec_t sleep_usec = 0;
1103
1104 if (u->sink->thread_info.rewind_requested)
1105 if (process_rewind(u) < 0)
1106 goto fail;
1107
1108 if (u->use_mmap)
1109 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1110 else
1111 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1112
1113 if (work_done < 0)
1114 goto fail;
1115
1116 /* pa_log_debug("work_done = %i", work_done); */
1117
1118 if (work_done) {
1119
1120 if (u->first) {
1121 pa_log_info("Starting playback.");
1122 snd_pcm_start(u->pcm_handle);
1123
1124 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1125 }
1126
1127 update_smoother(u);
1128 }
1129
1130 if (u->use_tsched) {
1131 pa_usec_t cusec;
1132
1133 if (u->since_start <= u->hwbuf_size) {
1134
1135 /* USB devices on ALSA seem to hit a buffer
1136 * underrun during the first iterations much
1137 * quicker then we calculate here, probably due to
1138 * the transport latency. To accomodate for that
1139 * we artificially decrease the sleep time until
1140 * we have filled the buffer at least once
1141 * completely.*/
1142
1143 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1144 sleep_usec /= 2;
1145 }
1146
1147 /* OK, the playback buffer is now full, let's
1148 * calculate when to wake up next */
1149 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1150
1151 /* Convert from the sound card time domain to the
1152 * system time domain */
1153 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1154
1155 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1156
1157 /* We don't trust the conversion, so we wake up whatever comes first */
1158 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1159 }
1160
1161 u->first = FALSE;
1162 u->after_rewind = FALSE;
1163
1164 } else if (u->use_tsched)
1165
1166 /* OK, we're in an invalid state, let's disable our timers */
1167 pa_rtpoll_set_timer_disabled(u->rtpoll);
1168
1169 /* Hmm, nothing to do. Let's sleep */
1170 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1171 goto fail;
1172
1173 if (ret == 0)
1174 goto finish;
1175
1176 /* Tell ALSA about this and process its response */
1177 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1178 struct pollfd *pollfd;
1179 int err;
1180 unsigned n;
1181
1182 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1183
1184 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1185 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1186 goto fail;
1187 }
1188
1189 if (revents & (POLLIN|POLLERR|POLLNVAL|POLLHUP|POLLPRI)) {
1190 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1191 goto fail;
1192
1193 u->first = TRUE;
1194 u->since_start = 0;
1195 }
1196
1197 if (revents && u->use_tsched && pa_log_ratelimit())
1198 pa_log_debug("Wakeup from ALSA!%s%s", (revents & POLLIN) ? " INPUT" : "", (revents & POLLOUT) ? " OUTPUT" : "");
1199 } else
1200 revents = 0;
1201 }
1202
1203 fail:
1204 /* If this was no regular exit from the loop we have to continue
1205 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1206 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1207 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1208
1209 finish:
1210 pa_log_debug("Thread shutting down");
1211 }
1212
1213 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1214 const char *n;
1215 char *t;
1216
1217 pa_assert(data);
1218 pa_assert(ma);
1219 pa_assert(device_name);
1220
1221 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1222 pa_sink_new_data_set_name(data, n);
1223 data->namereg_fail = TRUE;
1224 return;
1225 }
1226
1227 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1228 data->namereg_fail = TRUE;
1229 else {
1230 n = device_id ? device_id : device_name;
1231 data->namereg_fail = FALSE;
1232 }
1233
1234 t = pa_sprintf_malloc("alsa_output.%s", n);
1235 pa_sink_new_data_set_name(data, t);
1236 pa_xfree(t);
1237 }
1238
1239 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1240
1241 struct userdata *u = NULL;
1242 const char *dev_id = NULL;
1243 pa_sample_spec ss;
1244 pa_channel_map map;
1245 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1246 snd_pcm_uframes_t period_frames, tsched_frames;
1247 size_t frame_size;
1248 snd_pcm_info_t *pcm_info = NULL;
1249 int err;
1250 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d;
1251 pa_usec_t usec;
1252 pa_sink_new_data data;
1253
1254 snd_pcm_info_alloca(&pcm_info);
1255
1256 pa_assert(m);
1257 pa_assert(ma);
1258
1259 ss = m->core->default_sample_spec;
1260 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1261 pa_log("Failed to parse sample specification and channel map");
1262 goto fail;
1263 }
1264
1265 frame_size = pa_frame_size(&ss);
1266
1267 nfrags = m->core->default_n_fragments;
1268 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1269 if (frag_size <= 0)
1270 frag_size = (uint32_t) frame_size;
1271 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1272 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1273
1274 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1275 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1276 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1277 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1278 pa_log("Failed to parse buffer metrics");
1279 goto fail;
1280 }
1281
1282 hwbuf_size = frag_size * nfrags;
1283 period_frames = frag_size/frame_size;
1284 tsched_frames = tsched_size/frame_size;
1285
1286 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1287 pa_log("Failed to parse mmap argument.");
1288 goto fail;
1289 }
1290
1291 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1292 pa_log("Failed to parse tsched argument.");
1293 goto fail;
1294 }
1295
1296 if (use_tsched && !pa_rtclock_hrtimer()) {
1297 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1298 use_tsched = FALSE;
1299 }
1300
1301 u = pa_xnew0(struct userdata, 1);
1302 u->core = m->core;
1303 u->module = m;
1304 u->use_mmap = use_mmap;
1305 u->use_tsched = use_tsched;
1306 u->first = TRUE;
1307 u->since_start = 0;
1308 u->after_rewind = FALSE;
1309 u->rtpoll = pa_rtpoll_new();
1310 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1311 u->alsa_rtpoll_item = NULL;
1312
1313 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1314 usec = pa_rtclock_usec();
1315 pa_smoother_set_time_offset(u->smoother, usec);
1316 pa_smoother_pause(u->smoother, usec);
1317
1318 b = use_mmap;
1319 d = use_tsched;
1320
1321 if (profile) {
1322
1323 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1324 pa_log("device_id= not set");
1325 goto fail;
1326 }
1327
1328 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1329 dev_id,
1330 &u->device_name,
1331 &ss, &map,
1332 SND_PCM_STREAM_PLAYBACK,
1333 &nfrags, &period_frames, tsched_frames,
1334 &b, &d, profile)))
1335
1336 goto fail;
1337
1338 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1339
1340 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1341 dev_id,
1342 &u->device_name,
1343 &ss, &map,
1344 SND_PCM_STREAM_PLAYBACK,
1345 &nfrags, &period_frames, tsched_frames,
1346 &b, &d, &profile)))
1347
1348 goto fail;
1349
1350 } else {
1351
1352 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1353 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1354 &u->device_name,
1355 &ss, &map,
1356 SND_PCM_STREAM_PLAYBACK,
1357 &nfrags, &period_frames, tsched_frames,
1358 &b, &d, FALSE)))
1359 goto fail;
1360
1361 }
1362
1363 pa_assert(u->device_name);
1364 pa_log_info("Successfully opened device %s.", u->device_name);
1365
1366 if (profile)
1367 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1368
1369 if (use_mmap && !b) {
1370 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1371 u->use_mmap = use_mmap = FALSE;
1372 }
1373
1374 if (use_tsched && (!b || !d)) {
1375 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1376 u->use_tsched = use_tsched = FALSE;
1377 }
1378
1379 if (u->use_mmap)
1380 pa_log_info("Successfully enabled mmap() mode.");
1381
1382 if (u->use_tsched)
1383 pa_log_info("Successfully enabled timer-based scheduling mode.");
1384
1385 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1386 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1387 goto fail;
1388 }
1389
1390 /* ALSA might tweak the sample spec, so recalculate the frame size */
1391 frame_size = pa_frame_size(&ss);
1392
1393 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1394 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1395 else {
1396 pa_bool_t found = FALSE;
1397
1398 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1399 found = TRUE;
1400 else {
1401 snd_pcm_info_t *info;
1402
1403 snd_pcm_info_alloca(&info);
1404
1405 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1406 char *md;
1407 int card_idx;
1408
1409 if ((card_idx = snd_pcm_info_get_card(info)) >= 0) {
1410
1411 md = pa_sprintf_malloc("hw:%i", card_idx);
1412
1413 if (strcmp(u->device_name, md))
1414 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1415 found = TRUE;
1416 pa_xfree(md);
1417 }
1418 }
1419 }
1420
1421 if (found)
1422 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM", TRUE)))
1423 found = FALSE;
1424
1425 if (!found) {
1426 snd_mixer_close(u->mixer_handle);
1427 u->mixer_handle = NULL;
1428 }
1429 }
1430
1431 pa_sink_new_data_init(&data);
1432 data.driver = driver;
1433 data.module = m;
1434 data.card = card;
1435 set_sink_name(&data, ma, dev_id, u->device_name);
1436 pa_sink_new_data_set_sample_spec(&data, &ss);
1437 pa_sink_new_data_set_channel_map(&data, &map);
1438
1439 pa_alsa_init_proplist_pcm(m->core, data.proplist, pcm_info);
1440 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1441 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1442 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1443 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1444
1445 if (profile) {
1446 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1447 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1448 }
1449
1450 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1451 pa_sink_new_data_done(&data);
1452
1453 if (!u->sink) {
1454 pa_log("Failed to create sink object");
1455 goto fail;
1456 }
1457
1458 u->sink->parent.process_msg = sink_process_msg;
1459 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1460 u->sink->userdata = u;
1461
1462 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1463 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1464
1465 u->frame_size = frame_size;
1466 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1467 u->nfragments = nfrags;
1468 u->hwbuf_size = u->fragment_size * nfrags;
1469 u->hwbuf_unused_frames = 0;
1470 u->tsched_watermark = tsched_watermark;
1471 u->frame_index = 0;
1472 u->hw_dB_supported = FALSE;
1473 u->hw_dB_min = u->hw_dB_max = 0;
1474 u->hw_volume_min = u->hw_volume_max = 0;
1475 u->mixer_seperate_channels = FALSE;
1476 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1477
1478 if (use_tsched)
1479 fix_tsched_watermark(u);
1480
1481 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1482 u->sink->thread_info.max_request = u->hwbuf_size;
1483
1484 pa_sink_set_latency_range(u->sink,
1485 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1486 pa_bytes_to_usec(u->hwbuf_size, &ss));
1487
1488 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1489 nfrags, (long unsigned) u->fragment_size,
1490 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1491
1492 if (use_tsched)
1493 pa_log_info("Time scheduling watermark is %0.2fms",
1494 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1495
1496 if (update_sw_params(u) < 0)
1497 goto fail;
1498
1499 pa_memchunk_reset(&u->memchunk);
1500
1501 if (u->mixer_handle) {
1502 pa_assert(u->mixer_elem);
1503
1504 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1505 pa_bool_t suitable = FALSE;
1506
1507 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1508 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1509 else if (u->hw_volume_min >= u->hw_volume_max)
1510 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1511 else {
1512 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1513 suitable = TRUE;
1514 }
1515
1516 if (suitable) {
1517 if (snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1518 pa_log_info("Mixer doesn't support dB information.");
1519 else {
1520 #ifdef HAVE_VALGRIND_MEMCHECK_H
1521 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1522 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1523 #endif
1524
1525 if (u->hw_dB_min >= u->hw_dB_max)
1526 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1527 else {
1528 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1529 u->hw_dB_supported = TRUE;
1530
1531 if (u->hw_dB_max > 0) {
1532 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1533 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1534 } else
1535 pa_log_info("No particular base volume set, fixing to 0 dB");
1536 }
1537 }
1538
1539 if (!u->hw_dB_supported &&
1540 u->hw_volume_max - u->hw_volume_min < 3) {
1541
1542 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1543 suitable = FALSE;
1544 }
1545 }
1546
1547 if (suitable) {
1548 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1549
1550 u->sink->get_volume = sink_get_volume_cb;
1551 u->sink->set_volume = sink_set_volume_cb;
1552 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1553 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1554
1555 } else
1556 pa_log_info("Using software volume control.");
1557 }
1558
1559 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1560 u->sink->get_mute = sink_get_mute_cb;
1561 u->sink->set_mute = sink_set_mute_cb;
1562 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1563 } else
1564 pa_log_info("Using software mute control.");
1565
1566 u->mixer_fdl = pa_alsa_fdlist_new();
1567
1568 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1569 pa_log("Failed to initialize file descriptor monitoring");
1570 goto fail;
1571 }
1572
1573 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1574 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1575 } else
1576 u->mixer_fdl = NULL;
1577
1578 pa_alsa_dump(u->pcm_handle);
1579
1580 if (!(u->thread = pa_thread_new(thread_func, u))) {
1581 pa_log("Failed to create thread.");
1582 goto fail;
1583 }
1584
1585 /* Get initial mixer settings */
1586 if (data.volume_is_set) {
1587 if (u->sink->set_volume)
1588 u->sink->set_volume(u->sink);
1589 } else {
1590 if (u->sink->get_volume)
1591 u->sink->get_volume(u->sink);
1592 }
1593
1594 if (data.muted_is_set) {
1595 if (u->sink->set_mute)
1596 u->sink->set_mute(u->sink);
1597 } else {
1598 if (u->sink->get_mute)
1599 u->sink->get_mute(u->sink);
1600 }
1601
1602 pa_sink_put(u->sink);
1603
1604 return u->sink;
1605
1606 fail:
1607
1608 userdata_free(u);
1609
1610 return NULL;
1611 }
1612
1613 static void userdata_free(struct userdata *u) {
1614 pa_assert(u);
1615
1616 if (u->sink)
1617 pa_sink_unlink(u->sink);
1618
1619 if (u->thread) {
1620 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1621 pa_thread_free(u->thread);
1622 }
1623
1624 pa_thread_mq_done(&u->thread_mq);
1625
1626 if (u->sink)
1627 pa_sink_unref(u->sink);
1628
1629 if (u->memchunk.memblock)
1630 pa_memblock_unref(u->memchunk.memblock);
1631
1632 if (u->alsa_rtpoll_item)
1633 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1634
1635 if (u->rtpoll)
1636 pa_rtpoll_free(u->rtpoll);
1637
1638 if (u->mixer_fdl)
1639 pa_alsa_fdlist_free(u->mixer_fdl);
1640
1641 if (u->mixer_handle)
1642 snd_mixer_close(u->mixer_handle);
1643
1644 if (u->pcm_handle) {
1645 snd_pcm_drop(u->pcm_handle);
1646 snd_pcm_close(u->pcm_handle);
1647 }
1648
1649 if (u->smoother)
1650 pa_smoother_free(u->smoother);
1651
1652 pa_xfree(u->device_name);
1653 pa_xfree(u);
1654 }
1655
1656 void pa_alsa_sink_free(pa_sink *s) {
1657 struct userdata *u;
1658
1659 pa_sink_assert_ref(s);
1660 pa_assert_se(u = s->userdata);
1661
1662 userdata_free(u);
1663 }