]> code.delx.au - pulseaudio/blob - src/modules/alsa/alsa-sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / modules / alsa / alsa-sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #ifdef HAVE_VALGRIND_MEMCHECK_H
32 #include <valgrind/memcheck.h>
33 #endif
34
35 #include <pulse/xmalloc.h>
36 #include <pulse/util.h>
37 #include <pulse/timeval.h>
38
39 #include <pulsecore/core.h>
40 #include <pulsecore/module.h>
41 #include <pulsecore/memchunk.h>
42 #include <pulsecore/sink.h>
43 #include <pulsecore/modargs.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/rtclock.h>
53 #include <pulsecore/time-smoother.h>
54
55 #include "alsa-util.h"
56 #include "alsa-sink.h"
57
58 #define DEFAULT_DEVICE "default"
59 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
60 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
61 #define TSCHED_MIN_SLEEP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
62 #define TSCHED_MIN_WAKEUP_USEC (3*PA_USEC_PER_MSEC) /* 3ms */
63
64 struct userdata {
65 pa_core *core;
66 pa_module *module;
67 pa_sink *sink;
68
69 pa_thread *thread;
70 pa_thread_mq thread_mq;
71 pa_rtpoll *rtpoll;
72
73 snd_pcm_t *pcm_handle;
74
75 pa_alsa_fdlist *mixer_fdl;
76 snd_mixer_t *mixer_handle;
77 snd_mixer_elem_t *mixer_elem;
78 long hw_volume_max, hw_volume_min;
79 long hw_dB_max, hw_dB_min;
80 pa_bool_t hw_dB_supported;
81 pa_bool_t mixer_seperate_channels;
82 pa_cvolume hardware_volume;
83
84 size_t frame_size, fragment_size, hwbuf_size, tsched_watermark;
85 unsigned nfragments;
86 pa_memchunk memchunk;
87
88 char *device_name;
89
90 pa_bool_t use_mmap, use_tsched;
91
92 pa_bool_t first, after_rewind;
93
94 pa_rtpoll_item *alsa_rtpoll_item;
95
96 snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
97
98 pa_smoother *smoother;
99 int64_t frame_index;
100 uint64_t since_start;
101
102 snd_pcm_sframes_t hwbuf_unused_frames;
103 };
104
105 static void userdata_free(struct userdata *u);
106
107 static void fix_tsched_watermark(struct userdata *u) {
108 size_t max_use;
109 size_t min_sleep, min_wakeup;
110 pa_assert(u);
111
112 max_use = u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size;
113
114 min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
115 min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
116
117 if (min_sleep > max_use/2)
118 min_sleep = pa_frame_align(max_use/2, &u->sink->sample_spec);
119 if (min_sleep < u->frame_size)
120 min_sleep = u->frame_size;
121
122 if (min_wakeup > max_use/2)
123 min_wakeup = pa_frame_align(max_use/2, &u->sink->sample_spec);
124 if (min_wakeup < u->frame_size)
125 min_wakeup = u->frame_size;
126
127 if (u->tsched_watermark > max_use-min_sleep)
128 u->tsched_watermark = max_use-min_sleep;
129
130 if (u->tsched_watermark < min_wakeup)
131 u->tsched_watermark = min_wakeup;
132 }
133
134 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
135 pa_usec_t usec, wm;
136
137 pa_assert(sleep_usec);
138 pa_assert(process_usec);
139
140 pa_assert(u);
141
142 usec = pa_sink_get_requested_latency_within_thread(u->sink);
143
144 if (usec == (pa_usec_t) -1)
145 usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
146
147 /* pa_log_debug("hw buffer time: %u ms", (unsigned) (usec / PA_USEC_PER_MSEC)); */
148
149 wm = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
150
151 if (usec >= wm) {
152 *sleep_usec = usec - wm;
153 *process_usec = wm;
154 } else
155 *process_usec = *sleep_usec = usec / 2;
156
157 /* pa_log_debug("after watermark: %u ms", (unsigned) (*sleep_usec / PA_USEC_PER_MSEC)); */
158 }
159
160 static int try_recover(struct userdata *u, const char *call, int err) {
161 pa_assert(u);
162 pa_assert(call);
163 pa_assert(err < 0);
164
165 pa_log_debug("%s: %s", call, snd_strerror(err));
166
167 pa_assert(err != -EAGAIN);
168
169 if (err == -EPIPE)
170 pa_log_debug("%s: Buffer underrun!", call);
171
172 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0) {
173 u->first = TRUE;
174 u->since_start = 0;
175 return 0;
176 }
177
178 pa_log("%s: %s", call, snd_strerror(err));
179 return -1;
180 }
181
182 static size_t check_left_to_play(struct userdata *u, snd_pcm_sframes_t n) {
183 size_t left_to_play;
184
185 if ((size_t) n*u->frame_size < u->hwbuf_size)
186 left_to_play = u->hwbuf_size - ((size_t) n*u->frame_size);
187 else
188 left_to_play = 0;
189
190 if (left_to_play > 0) {
191 /* pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); */
192 } else if (!u->first && !u->after_rewind) {
193 pa_log_info("Underrun!");
194
195 if (u->use_tsched) {
196 size_t old_watermark = u->tsched_watermark;
197
198 u->tsched_watermark *= 2;
199 fix_tsched_watermark(u);
200
201 if (old_watermark != u->tsched_watermark)
202 pa_log_notice("Increasing wakeup watermark to %0.2f ms",
203 (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
204 }
205 }
206
207 return left_to_play;
208 }
209
210 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
211 int work_done = 0;
212 pa_usec_t max_sleep_usec = 0, process_usec = 0;
213 size_t left_to_play;
214
215 pa_assert(u);
216 pa_sink_assert_ref(u->sink);
217
218 if (u->use_tsched)
219 hw_sleep_time(u, &max_sleep_usec, &process_usec);
220
221 for (;;) {
222 snd_pcm_sframes_t n;
223 int r;
224
225 snd_pcm_hwsync(u->pcm_handle);
226
227 /* First we determine how many samples are missing to fill the
228 * buffer up to 100% */
229
230 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
231
232 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
233 continue;
234
235 return r;
236 }
237
238 left_to_play = check_left_to_play(u, n);
239
240 if (u->use_tsched)
241
242 /* We won't fill up the playback buffer before at least
243 * half the sleep time is over because otherwise we might
244 * ask for more data from the clients then they expect. We
245 * need to guarantee that clients only have to keep around
246 * a single hw buffer length. */
247
248 if (!polled &&
249 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
250 break;
251
252 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
253
254 if (polled && pa_log_ratelimit())
255 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
256 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
257 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
258
259 break;
260 }
261
262 n -= u->hwbuf_unused_frames;
263
264 polled = FALSE;
265
266 /* pa_log_debug("Filling up"); */
267
268 for (;;) {
269 pa_memchunk chunk;
270 void *p;
271 int err;
272 const snd_pcm_channel_area_t *areas;
273 snd_pcm_uframes_t offset, frames = (snd_pcm_uframes_t) n;
274 snd_pcm_sframes_t sframes;
275
276 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
277
278 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
279
280 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
281 continue;
282
283 return r;
284 }
285
286 /* Make sure that if these memblocks need to be copied they will fit into one slot */
287 if (frames > pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size)
288 frames = pa_mempool_block_size_max(u->sink->core->mempool)/u->frame_size;
289
290 /* Check these are multiples of 8 bit */
291 pa_assert((areas[0].first & 7) == 0);
292 pa_assert((areas[0].step & 7)== 0);
293
294 /* We assume a single interleaved memory buffer */
295 pa_assert((areas[0].first >> 3) == 0);
296 pa_assert((areas[0].step >> 3) == u->frame_size);
297
298 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
299
300 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
301 chunk.length = pa_memblock_get_length(chunk.memblock);
302 chunk.index = 0;
303
304 pa_sink_render_into_full(u->sink, &chunk);
305
306 /* FIXME: Maybe we can do something to keep this memory block
307 * a little bit longer around? */
308 pa_memblock_unref_fixed(chunk.memblock);
309
310 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
311
312 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
313 continue;
314
315 return r;
316 }
317
318 work_done = 1;
319
320 u->frame_index += (int64_t) frames;
321 u->since_start += frames * u->frame_size;
322
323 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
324
325 if (frames >= (snd_pcm_uframes_t) n)
326 break;
327
328 n -= (snd_pcm_sframes_t) frames;
329 }
330 }
331
332 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
333 return work_done;
334 }
335
336 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) {
337 int work_done = 0;
338 pa_usec_t max_sleep_usec = 0, process_usec = 0;
339 size_t left_to_play;
340
341 pa_assert(u);
342 pa_sink_assert_ref(u->sink);
343
344 if (u->use_tsched)
345 hw_sleep_time(u, &max_sleep_usec, &process_usec);
346
347 for (;;) {
348 snd_pcm_sframes_t n;
349 int r;
350
351 snd_pcm_hwsync(u->pcm_handle);
352
353 if (PA_UNLIKELY((n = pa_alsa_safe_avail_update(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
354
355 if ((r = try_recover(u, "snd_pcm_avail_update", (int) n)) == 0)
356 continue;
357
358 return r;
359 }
360
361 left_to_play = check_left_to_play(u, n);
362
363 if (u->use_tsched)
364
365 /* We won't fill up the playback buffer before at least
366 * half the sleep time is over because otherwise we might
367 * ask for more data from the clients then they expect. We
368 * need to guarantee that clients only have to keep around
369 * a single hw buffer length. */
370
371 if (!polled &&
372 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
373 break;
374
375 if (PA_UNLIKELY(n <= u->hwbuf_unused_frames)) {
376
377 if (polled && pa_log_ratelimit())
378 pa_log("ALSA woke us up to write new data to the device, but there was actually nothing to write! "
379 "Most likely this is an ALSA driver bug. Please report this issue to the ALSA developers. "
380 "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail_update() returned 0.");
381
382 break;
383 }
384
385 n -= u->hwbuf_unused_frames;
386
387 polled = FALSE;
388
389 for (;;) {
390 snd_pcm_sframes_t frames;
391 void *p;
392
393 /* pa_log_debug("%lu frames to write", (unsigned long) frames); */
394
395 if (u->memchunk.length <= 0)
396 pa_sink_render(u->sink, (size_t) n * u->frame_size, &u->memchunk);
397
398 pa_assert(u->memchunk.length > 0);
399
400 frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
401
402 if (frames > n)
403 frames = n;
404
405 p = pa_memblock_acquire(u->memchunk.memblock);
406 frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
407 pa_memblock_release(u->memchunk.memblock);
408
409 pa_assert(frames != 0);
410
411 if (PA_UNLIKELY(frames < 0)) {
412
413 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
414 continue;
415
416 return r;
417 }
418
419 u->memchunk.index += (size_t) frames * u->frame_size;
420 u->memchunk.length -= (size_t) frames * u->frame_size;
421
422 if (u->memchunk.length <= 0) {
423 pa_memblock_unref(u->memchunk.memblock);
424 pa_memchunk_reset(&u->memchunk);
425 }
426
427 work_done = 1;
428
429 u->frame_index += frames;
430 u->since_start += (size_t) frames * u->frame_size;
431
432 /* pa_log_debug("wrote %lu frames", (unsigned long) frames); */
433
434 if (frames >= n)
435 break;
436
437 n -= frames;
438 }
439 }
440
441 *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) - process_usec;
442 return work_done;
443 }
444
445 static void update_smoother(struct userdata *u) {
446 snd_pcm_sframes_t delay = 0;
447 int64_t frames;
448 int err;
449 pa_usec_t now1, now2;
450 /* struct timeval timestamp; */
451 snd_pcm_status_t *status;
452
453 snd_pcm_status_alloca(&status);
454
455 pa_assert(u);
456 pa_assert(u->pcm_handle);
457
458 /* Let's update the time smoother */
459
460 snd_pcm_hwsync(u->pcm_handle);
461 snd_pcm_avail_update(u->pcm_handle);
462
463 /* if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0)) { */
464 /* pa_log("Failed to query DSP status data: %s", snd_strerror(err)); */
465 /* return; */
466 /* } */
467
468 /* delay = snd_pcm_status_get_delay(status); */
469
470 if (PA_UNLIKELY((err = snd_pcm_delay(u->pcm_handle, &delay)) < 0)) {
471 pa_log("Failed to query DSP status data: %s", snd_strerror(err));
472 return;
473 }
474
475 frames = u->frame_index - delay;
476
477 /* pa_log_debug("frame_index = %llu, delay = %llu, p = %llu", (unsigned long long) u->frame_index, (unsigned long long) delay, (unsigned long long) frames); */
478
479 /* snd_pcm_status_get_tstamp(status, &timestamp); */
480 /* pa_rtclock_from_wallclock(&timestamp); */
481 /* now1 = pa_timeval_load(&timestamp); */
482
483 now1 = pa_rtclock_usec();
484 now2 = pa_bytes_to_usec((uint64_t) frames * u->frame_size, &u->sink->sample_spec);
485 pa_smoother_put(u->smoother, now1, now2);
486 }
487
488 static pa_usec_t sink_get_latency(struct userdata *u) {
489 pa_usec_t r = 0;
490 int64_t delay;
491 pa_usec_t now1, now2;
492
493 pa_assert(u);
494
495 now1 = pa_rtclock_usec();
496 now2 = pa_smoother_get(u->smoother, now1);
497
498 delay = (int64_t) pa_bytes_to_usec((uint64_t) u->frame_index * u->frame_size, &u->sink->sample_spec) - (int64_t) now2;
499
500 if (delay > 0)
501 r = (pa_usec_t) delay;
502
503 if (u->memchunk.memblock)
504 r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
505
506 return r;
507 }
508
509 static int build_pollfd(struct userdata *u) {
510 pa_assert(u);
511 pa_assert(u->pcm_handle);
512
513 if (u->alsa_rtpoll_item)
514 pa_rtpoll_item_free(u->alsa_rtpoll_item);
515
516 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
517 return -1;
518
519 return 0;
520 }
521
522 static int suspend(struct userdata *u) {
523 pa_assert(u);
524 pa_assert(u->pcm_handle);
525
526 pa_smoother_pause(u->smoother, pa_rtclock_usec());
527
528 /* Let's suspend -- we don't call snd_pcm_drain() here since that might
529 * take awfully long with our long buffer sizes today. */
530 snd_pcm_close(u->pcm_handle);
531 u->pcm_handle = NULL;
532
533 if (u->alsa_rtpoll_item) {
534 pa_rtpoll_item_free(u->alsa_rtpoll_item);
535 u->alsa_rtpoll_item = NULL;
536 }
537
538 pa_log_info("Device suspended...");
539
540 return 0;
541 }
542
543 static int update_sw_params(struct userdata *u) {
544 snd_pcm_uframes_t avail_min;
545 int err;
546
547 pa_assert(u);
548
549 /* Use the full buffer if noone asked us for anything specific */
550 u->hwbuf_unused_frames = 0;
551
552 if (u->use_tsched) {
553 pa_usec_t latency;
554
555 if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
556 size_t b;
557
558 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
559
560 b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
561
562 /* We need at least one sample in our buffer */
563
564 if (PA_UNLIKELY(b < u->frame_size))
565 b = u->frame_size;
566
567 u->hwbuf_unused_frames = (snd_pcm_sframes_t)
568 (PA_LIKELY(b < u->hwbuf_size) ?
569 ((u->hwbuf_size - b) / u->frame_size) : 0);
570 }
571
572 fix_tsched_watermark(u);
573 }
574
575 pa_log_debug("hwbuf_unused_frames=%lu", (unsigned long) u->hwbuf_unused_frames);
576
577 /* We need at last one frame in the used part of the buffer */
578 avail_min = (snd_pcm_uframes_t) u->hwbuf_unused_frames + 1;
579
580 if (u->use_tsched) {
581 pa_usec_t sleep_usec, process_usec;
582
583 hw_sleep_time(u, &sleep_usec, &process_usec);
584 avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
585 }
586
587 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
588
589 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min)) < 0) {
590 pa_log("Failed to set software parameters: %s", snd_strerror(err));
591 return err;
592 }
593
594 pa_sink_set_max_request(u->sink, u->hwbuf_size - (size_t) u->hwbuf_unused_frames * u->frame_size);
595
596 return 0;
597 }
598
599 static int unsuspend(struct userdata *u) {
600 pa_sample_spec ss;
601 int err;
602 pa_bool_t b, d;
603 unsigned nfrags;
604 snd_pcm_uframes_t period_size;
605
606 pa_assert(u);
607 pa_assert(!u->pcm_handle);
608
609 pa_log_info("Trying resume...");
610
611 snd_config_update_free_global();
612 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK,
613 /*SND_PCM_NONBLOCK|*/
614 SND_PCM_NO_AUTO_RESAMPLE|
615 SND_PCM_NO_AUTO_CHANNELS|
616 SND_PCM_NO_AUTO_FORMAT)) < 0) {
617 pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
618 goto fail;
619 }
620
621 ss = u->sink->sample_spec;
622 nfrags = u->nfragments;
623 period_size = u->fragment_size / u->frame_size;
624 b = u->use_mmap;
625 d = u->use_tsched;
626
627 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, u->hwbuf_size / u->frame_size, &b, &d, TRUE)) < 0) {
628 pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
629 goto fail;
630 }
631
632 if (b != u->use_mmap || d != u->use_tsched) {
633 pa_log_warn("Resume failed, couldn't get original access mode.");
634 goto fail;
635 }
636
637 if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
638 pa_log_warn("Resume failed, couldn't restore original sample settings.");
639 goto fail;
640 }
641
642 if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
643 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu*%lu, New %lu*%lu)",
644 (unsigned long) u->nfragments, (unsigned long) u->fragment_size,
645 (unsigned long) nfrags, period_size * u->frame_size);
646 goto fail;
647 }
648
649 if (update_sw_params(u) < 0)
650 goto fail;
651
652 if (build_pollfd(u) < 0)
653 goto fail;
654
655 /* FIXME: We need to reload the volume somehow */
656
657 u->first = TRUE;
658 u->since_start = 0;
659
660 pa_log_info("Resumed successfully...");
661
662 return 0;
663
664 fail:
665 if (u->pcm_handle) {
666 snd_pcm_close(u->pcm_handle);
667 u->pcm_handle = NULL;
668 }
669
670 return -1;
671 }
672
673 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
674 struct userdata *u = PA_SINK(o)->userdata;
675
676 switch (code) {
677
678 case PA_SINK_MESSAGE_GET_LATENCY: {
679 pa_usec_t r = 0;
680
681 if (u->pcm_handle)
682 r = sink_get_latency(u);
683
684 *((pa_usec_t*) data) = r;
685
686 return 0;
687 }
688
689 case PA_SINK_MESSAGE_SET_STATE:
690
691 switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
692
693 case PA_SINK_SUSPENDED:
694 pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state));
695
696 if (suspend(u) < 0)
697 return -1;
698
699 break;
700
701 case PA_SINK_IDLE:
702 case PA_SINK_RUNNING:
703
704 if (u->sink->thread_info.state == PA_SINK_INIT) {
705 if (build_pollfd(u) < 0)
706 return -1;
707 }
708
709 if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
710 if (unsuspend(u) < 0)
711 return -1;
712 }
713
714 break;
715
716 case PA_SINK_UNLINKED:
717 case PA_SINK_INIT:
718 case PA_SINK_INVALID_STATE:
719 ;
720 }
721
722 break;
723 }
724
725 return pa_sink_process_msg(o, code, data, offset, chunk);
726 }
727
728 static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
729 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
730
731 pa_assert(u);
732 pa_assert(u->mixer_handle);
733
734 if (mask == SND_CTL_EVENT_MASK_REMOVE)
735 return 0;
736
737 if (mask & SND_CTL_EVENT_MASK_VALUE) {
738 pa_sink_get_volume(u->sink, TRUE);
739 pa_sink_get_mute(u->sink, TRUE);
740 }
741
742 return 0;
743 }
744
745 static pa_volume_t from_alsa_volume(struct userdata *u, long alsa_vol) {
746
747 return (pa_volume_t) round(((double) (alsa_vol - u->hw_volume_min) * PA_VOLUME_NORM) /
748 (double) (u->hw_volume_max - u->hw_volume_min));
749 }
750
751 static long to_alsa_volume(struct userdata *u, pa_volume_t vol) {
752 long alsa_vol;
753
754 alsa_vol = (long) round(((double) vol * (double) (u->hw_volume_max - u->hw_volume_min))
755 / PA_VOLUME_NORM) + u->hw_volume_min;
756
757 return PA_CLAMP_UNLIKELY(alsa_vol, u->hw_volume_min, u->hw_volume_max);
758 }
759
760 static void sink_get_volume_cb(pa_sink *s) {
761 struct userdata *u = s->userdata;
762 int err;
763 unsigned i;
764 pa_cvolume r;
765 char t[PA_CVOLUME_SNPRINT_MAX];
766
767 pa_assert(u);
768 pa_assert(u->mixer_elem);
769
770 if (u->mixer_seperate_channels) {
771
772 r.channels = s->sample_spec.channels;
773
774 for (i = 0; i < s->sample_spec.channels; i++) {
775 long alsa_vol;
776
777 if (u->hw_dB_supported) {
778
779 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
780 goto fail;
781
782 #ifdef HAVE_VALGRIND_MEMCHECK_H
783 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
784 #endif
785
786 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
787 } else {
788
789 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
790 goto fail;
791
792 r.values[i] = from_alsa_volume(u, alsa_vol);
793 }
794 }
795
796 } else {
797 long alsa_vol;
798
799 if (u->hw_dB_supported) {
800
801 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
802 goto fail;
803
804 #ifdef HAVE_VALGRIND_MEMCHECK_H
805 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
806 #endif
807
808 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
809
810 } else {
811
812 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
813 goto fail;
814
815 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
816 }
817 }
818
819 pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &r));
820
821 if (!pa_cvolume_equal(&u->hardware_volume, &r)) {
822
823 s->virtual_volume = u->hardware_volume = r;
824
825 if (u->hw_dB_supported) {
826 pa_cvolume reset;
827
828 /* Hmm, so the hardware volume changed, let's reset our software volume */
829 pa_cvolume_reset(&reset, s->sample_spec.channels);
830 pa_sink_set_soft_volume(s, &reset);
831 }
832 }
833
834 return;
835
836 fail:
837 pa_log_error("Unable to read volume: %s", snd_strerror(err));
838 }
839
840 static void sink_set_volume_cb(pa_sink *s) {
841 struct userdata *u = s->userdata;
842 int err;
843 unsigned i;
844 pa_cvolume r;
845
846 pa_assert(u);
847 pa_assert(u->mixer_elem);
848
849 if (u->mixer_seperate_channels) {
850
851 r.channels = s->sample_spec.channels;
852
853 for (i = 0; i < s->sample_spec.channels; i++) {
854 long alsa_vol;
855 pa_volume_t vol;
856
857 vol = s->virtual_volume.values[i];
858
859 if (u->hw_dB_supported) {
860
861 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
862 alsa_vol += u->hw_dB_max;
863 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
864
865 if ((err = snd_mixer_selem_set_playback_dB(u->mixer_elem, u->mixer_map[i], alsa_vol, 1)) < 0)
866 goto fail;
867
868 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
869 goto fail;
870
871 #ifdef HAVE_VALGRIND_MEMCHECK_H
872 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
873 #endif
874
875 r.values[i] = pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0);
876
877 } else {
878 alsa_vol = to_alsa_volume(u, vol);
879
880 if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
881 goto fail;
882
883 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &alsa_vol)) < 0)
884 goto fail;
885
886 r.values[i] = from_alsa_volume(u, alsa_vol);
887 }
888 }
889
890 } else {
891 pa_volume_t vol;
892 long alsa_vol;
893
894 vol = pa_cvolume_max(&s->virtual_volume);
895
896 if (u->hw_dB_supported) {
897 alsa_vol = (long) (pa_sw_volume_to_dB(vol) * 100);
898 alsa_vol += u->hw_dB_max;
899 alsa_vol = PA_CLAMP_UNLIKELY(alsa_vol, u->hw_dB_min, u->hw_dB_max);
900
901 if ((err = snd_mixer_selem_set_playback_dB_all(u->mixer_elem, alsa_vol, 1)) < 0)
902 goto fail;
903
904 if ((err = snd_mixer_selem_get_playback_dB(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
905 goto fail;
906
907 #ifdef HAVE_VALGRIND_MEMCHECK_H
908 VALGRIND_MAKE_MEM_DEFINED(&alsa_vol, sizeof(alsa_vol));
909 #endif
910
911 pa_cvolume_set(&r, s->sample_spec.channels, pa_sw_volume_from_dB((double) (alsa_vol - u->hw_dB_max) / 100.0));
912
913 } else {
914 alsa_vol = to_alsa_volume(u, vol);
915
916 if ((err = snd_mixer_selem_set_playback_volume_all(u->mixer_elem, alsa_vol)) < 0)
917 goto fail;
918
919 if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, SND_MIXER_SCHN_MONO, &alsa_vol)) < 0)
920 goto fail;
921
922 pa_cvolume_set(&r, s->sample_spec.channels, from_alsa_volume(u, alsa_vol));
923 }
924 }
925
926 u->hardware_volume = r;
927
928 if (u->hw_dB_supported) {
929 char t[PA_CVOLUME_SNPRINT_MAX];
930
931 /* Match exactly what the user requested by software */
932 pa_sw_cvolume_divide(&s->soft_volume, &s->virtual_volume, &u->hardware_volume);
933
934 pa_log_debug("Requested volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->virtual_volume));
935 pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(t, sizeof(t), &u->hardware_volume));
936 pa_log_debug("Calculated software volume: %s", pa_cvolume_snprint(t, sizeof(t), &s->soft_volume));
937
938 } else
939
940 /* We can't match exactly what the user requested, hence let's
941 * at least tell the user about it */
942
943 s->virtual_volume = r;
944
945 return;
946
947 fail:
948 pa_log_error("Unable to set volume: %s", snd_strerror(err));
949 }
950
951 static void sink_get_mute_cb(pa_sink *s) {
952 struct userdata *u = s->userdata;
953 int err, sw;
954
955 pa_assert(u);
956 pa_assert(u->mixer_elem);
957
958 if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
959 pa_log_error("Unable to get switch: %s", snd_strerror(err));
960 return;
961 }
962
963 s->muted = !sw;
964 }
965
966 static void sink_set_mute_cb(pa_sink *s) {
967 struct userdata *u = s->userdata;
968 int err;
969
970 pa_assert(u);
971 pa_assert(u->mixer_elem);
972
973 if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
974 pa_log_error("Unable to set switch: %s", snd_strerror(err));
975 return;
976 }
977 }
978
979 static void sink_update_requested_latency_cb(pa_sink *s) {
980 struct userdata *u = s->userdata;
981 snd_pcm_sframes_t before;
982 pa_assert(u);
983
984 if (!u->pcm_handle)
985 return;
986
987 before = u->hwbuf_unused_frames;
988 update_sw_params(u);
989
990 /* Let's check whether we now use only a smaller part of the
991 buffer then before. If so, we need to make sure that subsequent
992 rewinds are relative to the new maxium fill level and not to the
993 current fill level. Thus, let's do a full rewind once, to clear
994 things up. */
995
996 if (u->hwbuf_unused_frames > before) {
997 pa_log_debug("Requesting rewind due to latency change.");
998 pa_sink_request_rewind(s, (size_t) -1);
999 }
1000 }
1001
1002 static int process_rewind(struct userdata *u) {
1003 snd_pcm_sframes_t unused;
1004 size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1005 pa_assert(u);
1006
1007 /* Figure out how much we shall rewind and reset the counter */
1008 rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1009 u->sink->thread_info.rewind_nbytes = 0;
1010
1011 if (rewind_nbytes <= 0)
1012 goto finish;
1013
1014 pa_assert(rewind_nbytes > 0);
1015 pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1016
1017 snd_pcm_hwsync(u->pcm_handle);
1018 if ((unused = snd_pcm_avail_update(u->pcm_handle)) < 0) {
1019 pa_log("snd_pcm_avail_update() failed: %s", snd_strerror((int) unused));
1020 return -1;
1021 }
1022
1023 unused_nbytes = u->tsched_watermark + (size_t) unused * u->frame_size;
1024
1025 if (u->hwbuf_size > unused_nbytes)
1026 limit_nbytes = u->hwbuf_size - unused_nbytes;
1027 else
1028 limit_nbytes = 0;
1029
1030 if (rewind_nbytes > limit_nbytes)
1031 rewind_nbytes = limit_nbytes;
1032
1033 if (rewind_nbytes > 0) {
1034 snd_pcm_sframes_t in_frames, out_frames;
1035
1036 pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1037
1038 in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1039 pa_log_debug("before: %lu", (unsigned long) in_frames);
1040 if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1041 pa_log("snd_pcm_rewind() failed: %s", snd_strerror((int) out_frames));
1042 return -1;
1043 }
1044 pa_log_debug("after: %lu", (unsigned long) out_frames);
1045
1046 rewind_nbytes = (size_t) out_frames * u->frame_size;
1047
1048 if (rewind_nbytes <= 0)
1049 pa_log_info("Tried rewind, but was apparently not possible.");
1050 else {
1051 u->frame_index -= out_frames;
1052 pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1053 pa_sink_process_rewind(u->sink, rewind_nbytes);
1054
1055 u->after_rewind = TRUE;
1056 return 0;
1057 }
1058 } else
1059 pa_log_debug("Mhmm, actually there is nothing to rewind.");
1060
1061 finish:
1062
1063 pa_sink_process_rewind(u->sink, 0);
1064
1065 return 0;
1066
1067 }
1068
1069 static void thread_func(void *userdata) {
1070 struct userdata *u = userdata;
1071 unsigned short revents = 0;
1072
1073 pa_assert(u);
1074
1075 pa_log_debug("Thread starting up");
1076
1077 if (u->core->realtime_scheduling)
1078 pa_make_realtime(u->core->realtime_priority);
1079
1080 pa_thread_mq_install(&u->thread_mq);
1081 pa_rtpoll_install(u->rtpoll);
1082
1083 for (;;) {
1084 int ret;
1085
1086 /* pa_log_debug("loop"); */
1087
1088 /* Render some data and write it to the dsp */
1089 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1090 int work_done;
1091 pa_usec_t sleep_usec = 0;
1092
1093 if (u->sink->thread_info.rewind_requested)
1094 if (process_rewind(u) < 0)
1095 goto fail;
1096
1097 if (u->use_mmap)
1098 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT);
1099 else
1100 work_done = unix_write(u, &sleep_usec, revents & POLLOUT);
1101
1102 if (work_done < 0)
1103 goto fail;
1104
1105 /* pa_log_debug("work_done = %i", work_done); */
1106
1107 if (work_done) {
1108
1109 if (u->first) {
1110 pa_log_info("Starting playback.");
1111 snd_pcm_start(u->pcm_handle);
1112
1113 pa_smoother_resume(u->smoother, pa_rtclock_usec());
1114 }
1115
1116 update_smoother(u);
1117 }
1118
1119 if (u->use_tsched) {
1120 pa_usec_t cusec;
1121
1122 if (u->since_start <= u->hwbuf_size) {
1123
1124 /* USB devices on ALSA seem to hit a buffer
1125 * underrun during the first iterations much
1126 * quicker then we calculate here, probably due to
1127 * the transport latency. To accomodate for that
1128 * we artificially decrease the sleep time until
1129 * we have filled the buffer at least once
1130 * completely.*/
1131
1132 /*pa_log_debug("Cutting sleep time for the initial iterations by half.");*/
1133 sleep_usec /= 2;
1134 }
1135
1136 /* OK, the playback buffer is now full, let's
1137 * calculate when to wake up next */
1138 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1139
1140 /* Convert from the sound card time domain to the
1141 * system time domain */
1142 cusec = pa_smoother_translate(u->smoother, pa_rtclock_usec(), sleep_usec);
1143
1144 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1145
1146 /* We don't trust the conversion, so we wake up whatever comes first */
1147 pa_rtpoll_set_timer_relative(u->rtpoll, PA_MIN(sleep_usec, cusec));
1148 }
1149
1150 u->first = FALSE;
1151 u->after_rewind = FALSE;
1152
1153 } else if (u->use_tsched)
1154
1155 /* OK, we're in an invalid state, let's disable our timers */
1156 pa_rtpoll_set_timer_disabled(u->rtpoll);
1157
1158 /* Hmm, nothing to do. Let's sleep */
1159 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1160 goto fail;
1161
1162 if (ret == 0)
1163 goto finish;
1164
1165 /* Tell ALSA about this and process its response */
1166 if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1167 struct pollfd *pollfd;
1168 int err;
1169 unsigned n;
1170
1171 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1172
1173 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1174 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
1175 goto fail;
1176 }
1177
1178 if (revents & (POLLIN|POLLERR|POLLNVAL|POLLHUP|POLLPRI)) {
1179 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1180 goto fail;
1181
1182 u->first = TRUE;
1183 u->since_start = 0;
1184 }
1185
1186 if (revents && u->use_tsched && pa_log_ratelimit())
1187 pa_log_debug("Wakeup from ALSA!%s%s", (revents & POLLIN) ? " INPUT" : "", (revents & POLLOUT) ? " OUTPUT" : "");
1188 } else
1189 revents = 0;
1190 }
1191
1192 fail:
1193 /* If this was no regular exit from the loop we have to continue
1194 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1195 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1196 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1197
1198 finish:
1199 pa_log_debug("Thread shutting down");
1200 }
1201
1202 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name) {
1203 const char *n;
1204 char *t;
1205
1206 pa_assert(data);
1207 pa_assert(ma);
1208 pa_assert(device_name);
1209
1210 if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
1211 pa_sink_new_data_set_name(data, n);
1212 data->namereg_fail = TRUE;
1213 return;
1214 }
1215
1216 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1217 data->namereg_fail = TRUE;
1218 else {
1219 n = device_id ? device_id : device_name;
1220 data->namereg_fail = FALSE;
1221 }
1222
1223 t = pa_sprintf_malloc("alsa_output.%s", n);
1224 pa_sink_new_data_set_name(data, t);
1225 pa_xfree(t);
1226 }
1227
1228 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, const pa_alsa_profile_info *profile) {
1229
1230 struct userdata *u = NULL;
1231 const char *dev_id = NULL;
1232 pa_sample_spec ss;
1233 pa_channel_map map;
1234 uint32_t nfrags, hwbuf_size, frag_size, tsched_size, tsched_watermark;
1235 snd_pcm_uframes_t period_frames, tsched_frames;
1236 size_t frame_size;
1237 snd_pcm_info_t *pcm_info = NULL;
1238 int err;
1239 pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE;
1240 pa_usec_t usec;
1241 pa_sink_new_data data;
1242
1243 snd_pcm_info_alloca(&pcm_info);
1244
1245 pa_assert(m);
1246 pa_assert(ma);
1247
1248 ss = m->core->default_sample_spec;
1249 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1250 pa_log("Failed to parse sample specification and channel map");
1251 goto fail;
1252 }
1253
1254 frame_size = pa_frame_size(&ss);
1255
1256 nfrags = m->core->default_n_fragments;
1257 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1258 if (frag_size <= 0)
1259 frag_size = (uint32_t) frame_size;
1260 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1261 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1262
1263 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1264 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1265 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1266 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1267 pa_log("Failed to parse buffer metrics");
1268 goto fail;
1269 }
1270
1271 hwbuf_size = frag_size * nfrags;
1272 period_frames = frag_size/frame_size;
1273 tsched_frames = tsched_size/frame_size;
1274
1275 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1276 pa_log("Failed to parse mmap argument.");
1277 goto fail;
1278 }
1279
1280 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1281 pa_log("Failed to parse tsched argument.");
1282 goto fail;
1283 }
1284
1285 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1286 pa_log("Failed to parse ignore_dB argument.");
1287 goto fail;
1288 }
1289
1290 if (use_tsched && !pa_rtclock_hrtimer()) {
1291 pa_log_notice("Disabling timer-based scheduling because high-resolution timers are not available from the kernel.");
1292 use_tsched = FALSE;
1293 }
1294
1295 u = pa_xnew0(struct userdata, 1);
1296 u->core = m->core;
1297 u->module = m;
1298 u->use_mmap = use_mmap;
1299 u->use_tsched = use_tsched;
1300 u->first = TRUE;
1301 u->since_start = 0;
1302 u->after_rewind = FALSE;
1303 u->rtpoll = pa_rtpoll_new();
1304 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1305 u->alsa_rtpoll_item = NULL;
1306
1307 u->smoother = pa_smoother_new(DEFAULT_TSCHED_BUFFER_USEC*2, DEFAULT_TSCHED_BUFFER_USEC*2, TRUE, 5);
1308 usec = pa_rtclock_usec();
1309 pa_smoother_set_time_offset(u->smoother, usec);
1310 pa_smoother_pause(u->smoother, usec);
1311
1312 b = use_mmap;
1313 d = use_tsched;
1314
1315 if (profile) {
1316
1317 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1318 pa_log("device_id= not set");
1319 goto fail;
1320 }
1321
1322 if (!(u->pcm_handle = pa_alsa_open_by_device_id_profile(
1323 dev_id,
1324 &u->device_name,
1325 &ss, &map,
1326 SND_PCM_STREAM_PLAYBACK,
1327 &nfrags, &period_frames, tsched_frames,
1328 &b, &d, profile)))
1329
1330 goto fail;
1331
1332 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1333
1334 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1335 dev_id,
1336 &u->device_name,
1337 &ss, &map,
1338 SND_PCM_STREAM_PLAYBACK,
1339 &nfrags, &period_frames, tsched_frames,
1340 &b, &d, &profile)))
1341
1342 goto fail;
1343
1344 } else {
1345
1346 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1347 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1348 &u->device_name,
1349 &ss, &map,
1350 SND_PCM_STREAM_PLAYBACK,
1351 &nfrags, &period_frames, tsched_frames,
1352 &b, &d, FALSE)))
1353 goto fail;
1354
1355 }
1356
1357 pa_assert(u->device_name);
1358 pa_log_info("Successfully opened device %s.", u->device_name);
1359
1360 if (profile)
1361 pa_log_info("Selected configuration '%s' (%s).", profile->description, profile->name);
1362
1363 if (use_mmap && !b) {
1364 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1365 u->use_mmap = use_mmap = FALSE;
1366 }
1367
1368 if (use_tsched && (!b || !d)) {
1369 pa_log_info("Cannot enabled timer-based scheduling, falling back to sound IRQ scheduling.");
1370 u->use_tsched = use_tsched = FALSE;
1371 }
1372
1373 if (u->use_mmap)
1374 pa_log_info("Successfully enabled mmap() mode.");
1375
1376 if (u->use_tsched)
1377 pa_log_info("Successfully enabled timer-based scheduling mode.");
1378
1379 if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
1380 pa_log("Error fetching PCM info: %s", snd_strerror(err));
1381 goto fail;
1382 }
1383
1384 /* ALSA might tweak the sample spec, so recalculate the frame size */
1385 frame_size = pa_frame_size(&ss);
1386
1387 if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
1388 pa_log_warn("Error opening mixer: %s", snd_strerror(err));
1389 else {
1390 pa_bool_t found = FALSE;
1391
1392 if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
1393 found = TRUE;
1394 else {
1395 snd_pcm_info_t *info;
1396
1397 snd_pcm_info_alloca(&info);
1398
1399 if (snd_pcm_info(u->pcm_handle, info) >= 0) {
1400 char *md;
1401 int card_idx;
1402
1403 if ((card_idx = snd_pcm_info_get_card(info)) >= 0) {
1404
1405 md = pa_sprintf_malloc("hw:%i", card_idx);
1406
1407 if (strcmp(u->device_name, md))
1408 if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
1409 found = TRUE;
1410 pa_xfree(md);
1411 }
1412 }
1413 }
1414
1415 if (found)
1416 if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM", TRUE)))
1417 found = FALSE;
1418
1419 if (!found) {
1420 snd_mixer_close(u->mixer_handle);
1421 u->mixer_handle = NULL;
1422 }
1423 }
1424
1425 pa_sink_new_data_init(&data);
1426 data.driver = driver;
1427 data.module = m;
1428 data.card = card;
1429 set_sink_name(&data, ma, dev_id, u->device_name);
1430 pa_sink_new_data_set_sample_spec(&data, &ss);
1431 pa_sink_new_data_set_channel_map(&data, &map);
1432
1433 pa_alsa_init_proplist_pcm(m->core, data.proplist, pcm_info);
1434 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1435 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (period_frames * frame_size * nfrags));
1436 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1437 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1438
1439 if (profile) {
1440 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, profile->name);
1441 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, profile->description);
1442 }
1443
1444 u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE|PA_SINK_LATENCY);
1445 pa_sink_new_data_done(&data);
1446
1447 if (!u->sink) {
1448 pa_log("Failed to create sink object");
1449 goto fail;
1450 }
1451
1452 u->sink->parent.process_msg = sink_process_msg;
1453 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1454 u->sink->userdata = u;
1455
1456 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1457 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1458
1459 u->frame_size = frame_size;
1460 u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size);
1461 u->nfragments = nfrags;
1462 u->hwbuf_size = u->fragment_size * nfrags;
1463 u->hwbuf_unused_frames = 0;
1464 u->tsched_watermark = tsched_watermark;
1465 u->frame_index = 0;
1466 u->hw_dB_supported = FALSE;
1467 u->hw_dB_min = u->hw_dB_max = 0;
1468 u->hw_volume_min = u->hw_volume_max = 0;
1469 u->mixer_seperate_channels = FALSE;
1470 pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
1471
1472 if (use_tsched)
1473 fix_tsched_watermark(u);
1474
1475 u->sink->thread_info.max_rewind = use_tsched ? u->hwbuf_size : 0;
1476 u->sink->thread_info.max_request = u->hwbuf_size;
1477
1478 pa_sink_set_latency_range(u->sink,
1479 !use_tsched ? pa_bytes_to_usec(u->hwbuf_size, &ss) : (pa_usec_t) -1,
1480 pa_bytes_to_usec(u->hwbuf_size, &ss));
1481
1482 pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms",
1483 nfrags, (long unsigned) u->fragment_size,
1484 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1485
1486 if (use_tsched)
1487 pa_log_info("Time scheduling watermark is %0.2fms",
1488 (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1489
1490 if (update_sw_params(u) < 0)
1491 goto fail;
1492
1493 pa_memchunk_reset(&u->memchunk);
1494
1495 if (u->mixer_handle) {
1496 pa_assert(u->mixer_elem);
1497
1498 if (snd_mixer_selem_has_playback_volume(u->mixer_elem)) {
1499 pa_bool_t suitable = FALSE;
1500
1501 if (snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max) < 0)
1502 pa_log_info("Failed to get volume range. Falling back to software volume control.");
1503 else if (u->hw_volume_min >= u->hw_volume_max)
1504 pa_log_warn("Your kernel driver is broken: it reports a volume range from %li to %li which makes no sense.", u->hw_volume_min, u->hw_volume_max);
1505 else {
1506 pa_log_info("Volume ranges from %li to %li.", u->hw_volume_min, u->hw_volume_max);
1507 suitable = TRUE;
1508 }
1509
1510 if (suitable) {
1511 if (ignore_dB || snd_mixer_selem_get_playback_dB_range(u->mixer_elem, &u->hw_dB_min, &u->hw_dB_max) < 0)
1512 pa_log_info("Mixer doesn't support dB information or data is ignored.");
1513 else {
1514 #ifdef HAVE_VALGRIND_MEMCHECK_H
1515 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_min, sizeof(u->hw_dB_min));
1516 VALGRIND_MAKE_MEM_DEFINED(&u->hw_dB_max, sizeof(u->hw_dB_max));
1517 #endif
1518
1519 if (u->hw_dB_min >= u->hw_dB_max)
1520 pa_log_warn("Your kernel driver is broken: it reports a volume range from %0.2f dB to %0.2f dB which makes no sense.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1521 else {
1522 pa_log_info("Volume ranges from %0.2f dB to %0.2f dB.", (double) u->hw_dB_min/100.0, (double) u->hw_dB_max/100.0);
1523 u->hw_dB_supported = TRUE;
1524
1525 if (u->hw_dB_max > 0) {
1526 u->sink->base_volume = pa_sw_volume_from_dB(- (double) u->hw_dB_max/100.0);
1527 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1528 } else
1529 pa_log_info("No particular base volume set, fixing to 0 dB");
1530 }
1531 }
1532
1533 if (!u->hw_dB_supported &&
1534 u->hw_volume_max - u->hw_volume_min < 3) {
1535
1536 pa_log_info("Device doesn't do dB volume and has less than 4 volume levels. Falling back to software volume control.");
1537 suitable = FALSE;
1538 }
1539 }
1540
1541 if (suitable) {
1542 u->mixer_seperate_channels = pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0;
1543
1544 u->sink->get_volume = sink_get_volume_cb;
1545 u->sink->set_volume = sink_set_volume_cb;
1546 u->sink->flags |= PA_SINK_HW_VOLUME_CTRL | (u->hw_dB_supported ? PA_SINK_DECIBEL_VOLUME : 0);
1547 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->hw_dB_supported ? "supported" : "not supported");
1548
1549 if (!u->hw_dB_supported)
1550 u->sink->n_volume_steps = u->hw_volume_max - u->hw_volume_min + 1;
1551 } else
1552 pa_log_info("Using software volume control.");
1553 }
1554
1555 if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
1556 u->sink->get_mute = sink_get_mute_cb;
1557 u->sink->set_mute = sink_set_mute_cb;
1558 u->sink->flags |= PA_SINK_HW_MUTE_CTRL;
1559 } else
1560 pa_log_info("Using software mute control.");
1561
1562 u->mixer_fdl = pa_alsa_fdlist_new();
1563
1564 if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
1565 pa_log("Failed to initialize file descriptor monitoring");
1566 goto fail;
1567 }
1568
1569 snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
1570 snd_mixer_elem_set_callback_private(u->mixer_elem, u);
1571 } else
1572 u->mixer_fdl = NULL;
1573
1574 pa_alsa_dump(u->pcm_handle);
1575
1576 if (!(u->thread = pa_thread_new(thread_func, u))) {
1577 pa_log("Failed to create thread.");
1578 goto fail;
1579 }
1580
1581 /* Get initial mixer settings */
1582 if (data.volume_is_set) {
1583 if (u->sink->set_volume)
1584 u->sink->set_volume(u->sink);
1585 } else {
1586 if (u->sink->get_volume)
1587 u->sink->get_volume(u->sink);
1588 }
1589
1590 if (data.muted_is_set) {
1591 if (u->sink->set_mute)
1592 u->sink->set_mute(u->sink);
1593 } else {
1594 if (u->sink->get_mute)
1595 u->sink->get_mute(u->sink);
1596 }
1597
1598 pa_sink_put(u->sink);
1599
1600 return u->sink;
1601
1602 fail:
1603
1604 userdata_free(u);
1605
1606 return NULL;
1607 }
1608
1609 static void userdata_free(struct userdata *u) {
1610 pa_assert(u);
1611
1612 if (u->sink)
1613 pa_sink_unlink(u->sink);
1614
1615 if (u->thread) {
1616 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1617 pa_thread_free(u->thread);
1618 }
1619
1620 pa_thread_mq_done(&u->thread_mq);
1621
1622 if (u->sink)
1623 pa_sink_unref(u->sink);
1624
1625 if (u->memchunk.memblock)
1626 pa_memblock_unref(u->memchunk.memblock);
1627
1628 if (u->alsa_rtpoll_item)
1629 pa_rtpoll_item_free(u->alsa_rtpoll_item);
1630
1631 if (u->rtpoll)
1632 pa_rtpoll_free(u->rtpoll);
1633
1634 if (u->mixer_fdl)
1635 pa_alsa_fdlist_free(u->mixer_fdl);
1636
1637 if (u->mixer_handle)
1638 snd_mixer_close(u->mixer_handle);
1639
1640 if (u->pcm_handle) {
1641 snd_pcm_drop(u->pcm_handle);
1642 snd_pcm_close(u->pcm_handle);
1643 }
1644
1645 if (u->smoother)
1646 pa_smoother_free(u->smoother);
1647
1648 pa_xfree(u->device_name);
1649 pa_xfree(u);
1650 }
1651
1652 void pa_alsa_sink_free(pa_sink *s) {
1653 struct userdata *u;
1654
1655 pa_sink_assert_ref(s);
1656 pa_assert_se(u = s->userdata);
1657
1658 userdata_free(u);
1659 }