AudioPlayer constructor
- String? userAgent,
- bool handleInterruptions = true,
- bool androidApplyAudioAttributes = true,
- bool handleAudioSessionActivation = true,
- AudioLoadConfiguration? audioLoadConfiguration,
- AudioPipeline? audioPipeline,
- bool androidOffloadSchedulingEnabled = false,
- bool useProxyForRequestHeaders = true,
Creates an AudioPlayer.
Apps requesting remote URLs should set the userAgent
parameter which
will be set as the user-agent
header on all requests (except on web
where the browser's user agent will be used) to identify the client. If
unspecified, a platform-specific default will be supplied.
Request headers including user-agent
are sent by default via a local
HTTP proxy which requires non-HTTPS support to be enabled (see the README
page for setup instructions). Alternatively, you can set
useProxyForRequestHeaders
to false
to allow supported platforms to
send the request headers directly without use of the proxy. On iOS/macOS,
this will use the AVURLAssetHTTPUserAgentKey
on iOS 16 and above, and
macOS 13 and above, if user-agent
is the only header used. Otherwise,
the AVURLAssetHTTPHeaderFieldsKey
key will be used. On Android, this
will use ExoPlayer's setUserAgent
and setDefaultRequestProperties
.
For Linux/Windows federated platform implementations, refer to the
documentation for that implementation's support.
The player will automatically pause/duck and resume/unduck when audio
interruptions occur (e.g. a phone call) or when headphones are unplugged.
If you wish to handle audio interruptions manually, set
handleInterruptions
to false
and interface directly with the audio
session via the audio_session
package. If you do not wish just_audio to automatically activate the audio
session when playing audio, set handleAudioSessionActivation
to false
.
If you do not want just_audio to respect the global
AndroidAudioAttributes
configured by audio_session, set
androidApplyAudioAttributes
to false
.
The default audio loading and buffering behaviour can be configured via
the audioLoadConfiguration
parameter.
Implementation
AudioPlayer({
String? userAgent,
bool handleInterruptions = true,
bool androidApplyAudioAttributes = true,
bool handleAudioSessionActivation = true,
AudioLoadConfiguration? audioLoadConfiguration,
AudioPipeline? audioPipeline,
bool androidOffloadSchedulingEnabled = false,
bool useProxyForRequestHeaders = true,
}) : _id = _uuid.v4(),
_userAgent = userAgent,
_androidApplyAudioAttributes =
androidApplyAudioAttributes && _isAndroid(),
_handleAudioSessionActivation = handleAudioSessionActivation,
_audioLoadConfiguration = audioLoadConfiguration,
_audioPipeline = audioPipeline ?? AudioPipeline(),
_androidOffloadSchedulingEnabled = androidOffloadSchedulingEnabled,
_useProxyForRequestHeaders = useProxyForRequestHeaders {
_audioPipeline._setup(this);
if (_audioLoadConfiguration?.darwinLoadControl != null) {
_automaticallyWaitsToMinimizeStalling = _audioLoadConfiguration!
.darwinLoadControl!.automaticallyWaitsToMinimizeStalling;
}
_playbackEventSubject.add(_playbackEvent);
_processingStateSubject.addStream(playbackEventStream
.map((event) => event.processingState)
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
_bufferedPositionSubject.addStream(playbackEventStream
.map((event) => event.bufferedPosition)
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
_icyMetadataSubject.addStream(playbackEventStream
.map((event) => event.icyMetadata)
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
playbackEventStream.pairwise().listen((pair) {
final prev = pair.first;
final curr = pair.last;
// Detect auto-advance
if (_seeking) return;
if (prev.currentIndex == null || curr.currentIndex == null) return;
if (curr.currentIndex != prev.currentIndex) {
// If we've changed item without seeking, it must be an autoAdvance.
_positionDiscontinuitySubject.add(PositionDiscontinuity(
PositionDiscontinuityReason.autoAdvance, prev, curr));
} else {
// If the item is the same, try to determine whether we have looped
// back.
final prevPos = _getPositionFor(prev);
final currPos = _getPositionFor(curr);
if (loopMode != LoopMode.one) return;
if (currPos >= prevPos) return;
if (currPos >= const Duration(milliseconds: 300)) return;
final duration = this.duration;
if (duration != null && prevPos < duration * 0.6) return;
if (duration == null &&
currPos - prevPos < const Duration(seconds: 1)) {
return;
}
_positionDiscontinuitySubject.add(PositionDiscontinuity(
PositionDiscontinuityReason.autoAdvance, prev, curr));
}
}, onError: (Object e, StackTrace st) {});
_currentIndexSubject.addStream(playbackEventStream
.map((event) => event.currentIndex)
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
_androidAudioSessionIdSubject.addStream(playbackEventStream
.map((event) => event.androidAudioSessionId)
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
_sequenceStateSubject.addStream(Rx.combineLatest5<List<IndexedAudioSource>?,
List<int>?, int?, bool, LoopMode, SequenceState?>(
sequenceStream,
shuffleIndicesStream,
currentIndexStream,
shuffleModeEnabledStream,
loopModeStream,
(sequence, shuffleIndices, currentIndex, shuffleModeEnabled, loopMode) {
if (sequence == null) return null;
if (shuffleIndices == null) return null;
currentIndex ??= 0;
currentIndex = max(min(sequence.length - 1, max(0, currentIndex)), 0);
return SequenceState(
sequence,
currentIndex,
shuffleIndices,
shuffleModeEnabled,
loopMode,
);
},
).distinct().handleError((Object err, StackTrace stackTrace) {/* noop */}));
_playerStateSubject.addStream(
Rx.combineLatest2<bool, PlaybackEvent, PlayerState>(
playingStream,
playbackEventStream,
(playing, event) => PlayerState(playing, event.processingState))
.distinct()
.handleError((Object err, StackTrace stackTrace) {/* noop */}));
_shuffleModeEnabledSubject.add(false);
_loopModeSubject.add(LoopMode.off);
_setPlatformActive(false, force: true)
?.catchError((dynamic e) async => null);
_sequenceSubject.add(null);
// Respond to changes to AndroidAudioAttributes configuration.
if (androidApplyAudioAttributes && _isAndroid()) {
AudioSession.instance.then((audioSession) {
audioSession.configurationStream
.map((conf) => conf.androidAudioAttributes)
.where((attributes) => attributes != null)
.cast<AndroidAudioAttributes>()
.distinct()
.listen(setAndroidAudioAttributes);
});
}
if (handleInterruptions) {
AudioSession.instance.then((session) {
session.becomingNoisyEventStream.listen((_) {
pause();
});
session.interruptionEventStream.listen((event) {
if (event.begin) {
switch (event.type) {
case AudioInterruptionType.duck:
assert(_isAndroid());
if (session.androidAudioAttributes!.usage ==
AndroidAudioUsage.game) {
setVolume(volume / 2);
}
_playInterrupted = false;
break;
case AudioInterruptionType.pause:
case AudioInterruptionType.unknown:
if (playing) {
pause();
// Although pause is async and sets _playInterrupted = false,
// this is done in the sync portion.
_playInterrupted = true;
}
break;
}
} else {
switch (event.type) {
case AudioInterruptionType.duck:
assert(_isAndroid());
setVolume(min(1.0, volume * 2));
_playInterrupted = false;
break;
case AudioInterruptionType.pause:
if (_playInterrupted) play();
_playInterrupted = false;
break;
case AudioInterruptionType.unknown:
_playInterrupted = false;
break;
}
}
});
});
}
_removeOldAssetCacheDir();
}