fix(audio-messaging): android + fix dark theme label
This commit is contained in:
parent
aade47beb3
commit
a69321f82e
2 changed files with 287 additions and 64 deletions
|
|
@ -163,6 +163,15 @@ export default React.memo(function ChatInput({
|
|||
return;
|
||||
}
|
||||
requestingMicRef.current = true;
|
||||
const startTs = Date.now();
|
||||
const logStep = (step, extra) => {
|
||||
console.log("[ChatInput] startRecording step", {
|
||||
step,
|
||||
platform: Platform.OS,
|
||||
t: Date.now() - startTs,
|
||||
...(extra ? extra : {}),
|
||||
});
|
||||
};
|
||||
try {
|
||||
console.log("[ChatInput] startRecording invoked", {
|
||||
platform: Platform.OS,
|
||||
|
|
@ -176,6 +185,7 @@ export default React.memo(function ChatInput({
|
|||
return;
|
||||
}
|
||||
|
||||
logStep("permission:begin");
|
||||
console.log("Requesting microphone permission..");
|
||||
if (Platform.OS === "android") {
|
||||
const { granted, status } = await ensureMicPermission();
|
||||
|
|
@ -199,6 +209,8 @@ export default React.memo(function ChatInput({
|
|||
} else {
|
||||
// iOS microphone permission is handled inside useVoiceRecorder via expo-audio
|
||||
}
|
||||
logStep("permission:end");
|
||||
|
||||
// stop playback
|
||||
if (player !== null) {
|
||||
try {
|
||||
|
|
@ -211,7 +223,13 @@ export default React.memo(function ChatInput({
|
|||
console.log(
|
||||
"[ChatInput] startRecording delegating to useVoiceRecorder.start",
|
||||
);
|
||||
await startVoiceRecorder();
|
||||
logStep("useVoiceRecorder.start:begin");
|
||||
await startVoiceRecorder({
|
||||
// Android: permission is already handled via react-native-permissions in this component.
|
||||
// expo-audio's requestRecordingPermissionsAsync can hang on Android 16.
|
||||
skipPermissionRequest: Platform.OS === "android",
|
||||
});
|
||||
logStep("useVoiceRecorder.start:end");
|
||||
|
||||
// Announce once when recording starts.
|
||||
if (lastRecordingAnnouncementRef.current !== "started") {
|
||||
|
|
@ -658,5 +676,6 @@ const useStyles = createStyles(({ fontSize, wp, theme: { colors } }) => ({
|
|||
recordingExponentText: {
|
||||
height: 32,
|
||||
fontSize: 16,
|
||||
color: colors.onSurface,
|
||||
},
|
||||
}));
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import { AppState, Platform } from "react-native";
|
||||
import {
|
||||
RecordingPresets,
|
||||
requestRecordingPermissionsAsync,
|
||||
|
|
@ -9,11 +10,81 @@ import {
|
|||
|
||||
let hasLoggedAudioMode = false;
|
||||
|
||||
const nowMs = () => Date.now();
|
||||
|
||||
const withTimeout = async (promise, timeoutMs, label) => {
|
||||
if (!timeoutMs || timeoutMs <= 0) {
|
||||
return promise;
|
||||
}
|
||||
let timeoutId;
|
||||
const timeoutPromise = new Promise((_, reject) => {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(
|
||||
new Error(
|
||||
`[useVoiceRecorder] timeout in ${label} after ${timeoutMs}ms`,
|
||||
),
|
||||
);
|
||||
}, timeoutMs);
|
||||
});
|
||||
try {
|
||||
// race between actual promise and timeout
|
||||
return await Promise.race([promise, timeoutPromise]);
|
||||
} finally {
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const waitForAppActive = async (timeoutMs) => {
|
||||
if (AppState.currentState === "active") {
|
||||
return;
|
||||
}
|
||||
|
||||
let sub;
|
||||
let timeoutId;
|
||||
try {
|
||||
await new Promise((resolve, reject) => {
|
||||
if (timeoutMs && timeoutMs > 0) {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(
|
||||
new Error(
|
||||
`[useVoiceRecorder] timeout in waitForAppActive after ${timeoutMs}ms`,
|
||||
),
|
||||
);
|
||||
}, timeoutMs);
|
||||
}
|
||||
|
||||
sub = AppState.addEventListener("change", (state) => {
|
||||
if (state === "active") {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
} finally {
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
try {
|
||||
sub?.remove?.();
|
||||
} catch (_e) {}
|
||||
}
|
||||
};
|
||||
|
||||
const nextFrame = () =>
|
||||
new Promise((resolve) => {
|
||||
requestAnimationFrame(() => resolve());
|
||||
});
|
||||
|
||||
export default function useVoiceRecorder() {
|
||||
const recorderRef = useRef(null);
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const [uri, setUri] = useState(null);
|
||||
|
||||
// Used to cancel stale/blocked starts so they cannot complete later
|
||||
// (e.g. after a background->foreground transition).
|
||||
const startAttemptRef = useRef(0);
|
||||
|
||||
// NOTE: `expo-audio` doesn't export `AudioRecorder` as a runtime JS class.
|
||||
// The supported API is `useAudioRecorder`, which returns a native-backed SharedObject.
|
||||
const preset =
|
||||
|
|
@ -51,71 +122,194 @@ export default function useVoiceRecorder() {
|
|||
setIsRecording(false);
|
||||
}, []);
|
||||
|
||||
const start = useCallback(async () => {
|
||||
// Reset any previous recording before starting a new one
|
||||
await cleanupRecording();
|
||||
setUri(null);
|
||||
|
||||
const permission = await requestRecordingPermissionsAsync();
|
||||
if (!permission?.granted) {
|
||||
throw new Error("Microphone permission not granted");
|
||||
}
|
||||
|
||||
// Configure audio mode for recording (iOS & Android)
|
||||
const recordingAudioMode = {
|
||||
allowsRecording: true,
|
||||
playsInSilentMode: true,
|
||||
interruptionMode: "doNotMix",
|
||||
interruptionModeAndroid: "doNotMix",
|
||||
shouldRouteThroughEarpiece: false,
|
||||
// Foreground-first: keep the audio session inactive in background.
|
||||
shouldPlayInBackground: false,
|
||||
};
|
||||
|
||||
if (!hasLoggedAudioMode) {
|
||||
console.log("[useVoiceRecorder] audio mode set", recordingAudioMode);
|
||||
hasLoggedAudioMode = true;
|
||||
}
|
||||
|
||||
await setAudioModeAsync(recordingAudioMode);
|
||||
|
||||
const prepareAndStart = async () => {
|
||||
await setIsAudioActiveAsync(true).catch(() => {});
|
||||
console.log("[useVoiceRecorder] preparing recorder");
|
||||
await recorder.prepareToRecordAsync();
|
||||
console.log("[useVoiceRecorder] starting recorder");
|
||||
recorder.record();
|
||||
setIsRecording(true);
|
||||
};
|
||||
try {
|
||||
await prepareAndStart();
|
||||
} catch (error) {
|
||||
console.log("[useVoiceRecorder] recorder start failed", error);
|
||||
|
||||
// One controlled retry for iOS: reset the audio session and try once more.
|
||||
try {
|
||||
await cleanupRecording();
|
||||
await setAudioModeAsync(recordingAudioMode);
|
||||
await new Promise((r) => setTimeout(r, 150));
|
||||
await prepareAndStart();
|
||||
return;
|
||||
} catch (_retryError) {
|
||||
console.log("[useVoiceRecorder] recorder retry failed", _retryError);
|
||||
}
|
||||
|
||||
try {
|
||||
if (recorderRef.current?.isRecording) {
|
||||
await recorderRef.current.stop();
|
||||
const start = useCallback(
|
||||
async (options) => {
|
||||
const opts = options || {};
|
||||
const attemptId = ++startAttemptRef.current;
|
||||
const attemptStart = nowMs();
|
||||
const logStep = (step, extra) => {
|
||||
console.log("[useVoiceRecorder] start step", {
|
||||
step,
|
||||
platform: Platform.OS,
|
||||
attemptId,
|
||||
t: nowMs() - attemptStart,
|
||||
...(extra ? extra : {}),
|
||||
});
|
||||
};
|
||||
const assertNotCancelled = () => {
|
||||
if (startAttemptRef.current !== attemptId) {
|
||||
const err = new Error(
|
||||
"[useVoiceRecorder] start cancelled/superseded",
|
||||
);
|
||||
err.__CANCELLED__ = true;
|
||||
throw err;
|
||||
}
|
||||
} catch (_e) {
|
||||
// ignore cleanup failures
|
||||
} finally {
|
||||
// keep recorder instance; hook will manage its lifecycle
|
||||
setIsRecording(false);
|
||||
};
|
||||
|
||||
// Reset any previous recording before starting a new one
|
||||
await cleanupRecording();
|
||||
setUri(null);
|
||||
|
||||
// If the app is not active, do not attempt a start (it may complete later unexpectedly).
|
||||
// This is especially important on Android where audio focus can be deferred.
|
||||
if (Platform.OS === "android") {
|
||||
logStep("waitForAppActive:begin", { appState: AppState.currentState });
|
||||
await waitForAppActive(2500).catch((_e) => {
|
||||
// If we cannot become active quickly, abort this start.
|
||||
});
|
||||
assertNotCancelled();
|
||||
logStep("waitForAppActive:end", { appState: AppState.currentState });
|
||||
|
||||
if (AppState.currentState !== "active") {
|
||||
throw new Error("[useVoiceRecorder] start aborted: app not active");
|
||||
}
|
||||
|
||||
// Yield one frame to ensure the permission dialog/gesture cycle has fully finished.
|
||||
await nextFrame();
|
||||
assertNotCancelled();
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}, [cleanupRecording, recorder]);
|
||||
|
||||
// Permissions
|
||||
// - iOS: expo-audio permission API is the single source of truth.
|
||||
// - Android: the app already requests RECORD_AUDIO via react-native-permissions
|
||||
// in [`startRecording()`](src/containers/ChatInput/index.js:161).
|
||||
// On Android 16 we observed `requestRecordingPermissionsAsync()` can hang,
|
||||
// so we allow skipping it.
|
||||
if (Platform.OS === "android" && opts.skipPermissionRequest === true) {
|
||||
logStep("permissions:skipped");
|
||||
} else {
|
||||
logStep("permissions:begin");
|
||||
const permission = await withTimeout(
|
||||
requestRecordingPermissionsAsync(),
|
||||
// iOS can sometimes take time if the system dialog appears; keep no timeout.
|
||||
Platform.OS === "android" ? 4000 : 0,
|
||||
"requestRecordingPermissionsAsync",
|
||||
);
|
||||
logStep("permissions:end", { granted: !!permission?.granted });
|
||||
if (!permission?.granted) {
|
||||
throw new Error("Microphone permission not granted");
|
||||
}
|
||||
}
|
||||
assertNotCancelled();
|
||||
|
||||
// Configure audio mode for recording (iOS & Android)
|
||||
const recordingAudioMode = {
|
||||
allowsRecording: true,
|
||||
playsInSilentMode: true,
|
||||
interruptionMode: "doNotMix",
|
||||
interruptionModeAndroid: "doNotMix",
|
||||
shouldRouteThroughEarpiece: false,
|
||||
// Foreground-first: keep the audio session inactive in background.
|
||||
shouldPlayInBackground: false,
|
||||
};
|
||||
|
||||
if (!hasLoggedAudioMode) {
|
||||
console.log("[useVoiceRecorder] audio mode set", recordingAudioMode);
|
||||
hasLoggedAudioMode = true;
|
||||
}
|
||||
|
||||
logStep("setAudioModeAsync:begin");
|
||||
await withTimeout(
|
||||
setAudioModeAsync(recordingAudioMode),
|
||||
Platform.OS === "android" ? 4000 : 0,
|
||||
"setAudioModeAsync",
|
||||
);
|
||||
logStep("setAudioModeAsync:end");
|
||||
assertNotCancelled();
|
||||
|
||||
const prepareAndStart = async () => {
|
||||
logStep("setIsAudioActiveAsync:begin");
|
||||
await withTimeout(
|
||||
setIsAudioActiveAsync(true).catch(() => {}),
|
||||
Platform.OS === "android" ? 4000 : 0,
|
||||
"setIsAudioActiveAsync",
|
||||
);
|
||||
logStep("setIsAudioActiveAsync:end");
|
||||
assertNotCancelled();
|
||||
|
||||
console.log("[useVoiceRecorder] preparing recorder");
|
||||
logStep("prepareToRecordAsync:begin");
|
||||
await withTimeout(
|
||||
recorder.prepareToRecordAsync(),
|
||||
Platform.OS === "android" ? 7000 : 0,
|
||||
"prepareToRecordAsync",
|
||||
);
|
||||
logStep("prepareToRecordAsync:end");
|
||||
assertNotCancelled();
|
||||
|
||||
console.log("[useVoiceRecorder] starting recorder");
|
||||
logStep("record:invoke");
|
||||
recorder.record();
|
||||
|
||||
// Some Android versions may take a moment to flip the native state.
|
||||
// Avoid marking isRecording true until the recorder actually reports recording.
|
||||
if (Platform.OS === "android") {
|
||||
const startWait = nowMs();
|
||||
while (nowMs() - startWait < 800) {
|
||||
assertNotCancelled();
|
||||
if (recorder.isRecording) {
|
||||
break;
|
||||
}
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
await new Promise((r) => setTimeout(r, 50));
|
||||
}
|
||||
}
|
||||
|
||||
assertNotCancelled();
|
||||
setIsRecording(true);
|
||||
logStep("record:started", { isRecording: !!recorder.isRecording });
|
||||
};
|
||||
try {
|
||||
await prepareAndStart();
|
||||
} catch (error) {
|
||||
console.log("[useVoiceRecorder] recorder start failed", error);
|
||||
|
||||
// One controlled retry for iOS: reset the audio session and try once more.
|
||||
try {
|
||||
await cleanupRecording();
|
||||
await setAudioModeAsync(recordingAudioMode);
|
||||
await new Promise((r) => setTimeout(r, 150));
|
||||
await prepareAndStart();
|
||||
return;
|
||||
} catch (_retryError) {
|
||||
console.log("[useVoiceRecorder] recorder retry failed", _retryError);
|
||||
}
|
||||
|
||||
// One controlled retry for Android if we hit a timeout/hang.
|
||||
// This prevents a later background->foreground from completing the old attempt.
|
||||
if (Platform.OS === "android") {
|
||||
try {
|
||||
startAttemptRef.current = attemptId; // keep attempt active for the retry
|
||||
await cleanupRecording();
|
||||
await new Promise((r) => setTimeout(r, 200));
|
||||
assertNotCancelled();
|
||||
logStep("androidRetry:begin");
|
||||
await prepareAndStart();
|
||||
logStep("androidRetry:success");
|
||||
return;
|
||||
} catch (_androidRetryError) {
|
||||
console.log(
|
||||
"[useVoiceRecorder] android retry failed",
|
||||
_androidRetryError,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (recorderRef.current?.isRecording) {
|
||||
await recorderRef.current.stop();
|
||||
}
|
||||
} catch (_e) {
|
||||
// ignore cleanup failures
|
||||
} finally {
|
||||
// keep recorder instance; hook will manage its lifecycle
|
||||
setIsRecording(false);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
[cleanupRecording, recorder],
|
||||
);
|
||||
|
||||
const stop = useCallback(async () => {
|
||||
const recorder = recorderRef.current;
|
||||
|
|
@ -142,7 +336,17 @@ export default function useVoiceRecorder() {
|
|||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
// Cancel any pending start when the app transitions away from active.
|
||||
// This prevents a stalled promise from completing later and starting recording unexpectedly.
|
||||
const sub = AppState.addEventListener("change", (state) => {
|
||||
if (state !== "active") {
|
||||
startAttemptRef.current += 1;
|
||||
}
|
||||
});
|
||||
return () => {
|
||||
try {
|
||||
sub.remove();
|
||||
} catch (_e) {}
|
||||
const recorder = recorderRef.current;
|
||||
if (recorder) {
|
||||
if (recorder.isRecording) {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue