+
{top}
-
- {before && (
-
+
+ {hasBefore && (
+
{before}
)}
(
}}
/>
- {after && (
-
+ {(hasAfter || showResponsiveAfterInline) && (
+
+ {showResponsiveAfterInline && responsiveAfter}
{after}
)}
+ {showResponsiveAfterInFooter && (
+
+ {responsiveAfter}
+
+ )}
{bottom}
diff --git a/src/app/components/upload-card/UploadCardRenderer.tsx b/src/app/components/upload-card/UploadCardRenderer.tsx
index 931215b63..d9fa444f7 100644
--- a/src/app/components/upload-card/UploadCardRenderer.tsx
+++ b/src/app/components/upload-card/UploadCardRenderer.tsx
@@ -128,6 +128,10 @@ function PreviewAudio({ fileItem }: PreviewAudioProps) {
return undefined;
}
const audio = new Audio(audioUrl);
+ audio.preload = 'auto';
+ // Explicitly load so Firefox parses metadata immediately, making
+ // currentTime writable before the user has ever pressed play.
+ audio.load();
audioRef.current = audio;
audio.onended = () => {
@@ -177,13 +181,34 @@ function PreviewAudio({ fileItem }: PreviewAudioProps) {
}
};
+ const seekTo = (audio: HTMLAudioElement, targetTime: number) => {
+ // Alias to a local const to satisfy no-param-reassign.
+ const el = audio;
+ if (el.seekable.length > 0) {
+ el.currentTime = targetTime;
+ setCurrentTime(targetTime);
+ } else {
+ // Metadata not yet loaded (Firefox, first scrub before load() resolves).
+ // Do NOT call load() again here — that resets currentTime to 0 and
+ // restarts the fetch. load() was already called in the useEffect;
+ // just wait for the in-flight loadedmetadata event.
+ el.addEventListener(
+ 'loadedmetadata',
+ () => {
+ el.currentTime = targetTime;
+ setCurrentTime(targetTime);
+ },
+ { once: true }
+ );
+ }
+ };
+
const handleScrubClick = (e: React.MouseEvent
) => {
const audio = audioRef.current;
if (!audio || !duration) return;
const rect = e.currentTarget.getBoundingClientRect();
const ratio = Math.max(0, Math.min(1, (e.clientX - rect.left) / rect.width));
- audio.currentTime = ratio * duration;
- setCurrentTime(audio.currentTime);
+ seekTo(audio, ratio * duration);
};
const handleKeyDown = (e: React.KeyboardEvent) => {
@@ -209,8 +234,7 @@ function PreviewAudio({ fileItem }: PreviewAudioProps) {
return;
}
- audio.currentTime = newTime;
- setCurrentTime(newTime);
+ seekTo(audio, newTime);
};
return (
diff --git a/src/app/features/room/AudioMessageRecorder.css.ts b/src/app/features/room/AudioMessageRecorder.css.ts
index 47b165841..d53440044 100644
--- a/src/app/features/room/AudioMessageRecorder.css.ts
+++ b/src/app/features/room/AudioMessageRecorder.css.ts
@@ -22,7 +22,8 @@ const Shake = keyframes({
export const Container = style([
DefaultReset,
{
- flexGrow: 1,
+ width: '100%',
+ maxWidth: toRem(280),
minWidth: 0,
overflow: 'hidden',
touchAction: 'pan-y',
@@ -56,6 +57,7 @@ export const WaveformContainer = style([
height: 22,
overflow: 'hidden',
minWidth: 0,
+ flexGrow: 1,
},
]);
diff --git a/src/app/features/room/AudioMessageRecorder.tsx b/src/app/features/room/AudioMessageRecorder.tsx
index 5ec592bc8..136ccb6f8 100644
--- a/src/app/features/room/AudioMessageRecorder.tsx
+++ b/src/app/features/room/AudioMessageRecorder.tsx
@@ -7,6 +7,7 @@ import {
useRef,
useState,
} from 'react';
+import { useElementSizeObserver } from '$hooks/useElementSizeObserver';
import { useVoiceRecorder } from '$plugins/voice-recorder-kit';
import type { VoiceRecorderStopPayload } from '$plugins/voice-recorder-kit';
import { Box, Text } from 'folds';
@@ -37,14 +38,22 @@ function formatTime(seconds: number): string {
return `${m}:${s.toString().padStart(2, '0')}`;
}
+const MAX_BAR_COUNT = 28;
+const MIN_BAR_COUNT = 8;
+const BAR_WIDTH_PX = 2;
+const BAR_GAP_PX = 4;
+const RECORDER_CHROME_PX = 72;
+
export const AudioMessageRecorder = forwardRef<
AudioMessageRecorderHandle,
AudioMessageRecorderProps
>(({ onRecordingComplete, onRequestClose, onWaveformUpdate, onAudioLengthUpdate }, ref) => {
const isDismissedRef = useRef(false);
const userRequestedStopRef = useRef(false);
+ const containerRef = useRef(null);
const [isCanceling, setIsCanceling] = useState(false);
const [announcedTime, setAnnouncedTime] = useState(0);
+ const [barCount, setBarCount] = useState(MAX_BAR_COUNT);
const onRecordingCompleteRef = useRef(onRecordingComplete);
onRecordingCompleteRef.current = onRecordingComplete;
@@ -56,6 +65,8 @@ export const AudioMessageRecorder = forwardRef<
onAudioLengthUpdateRef.current = onAudioLengthUpdate;
const stableOnStop = useCallback((payload: VoiceRecorderStopPayload) => {
+ // useVoiceRecorder also stops during cancel/teardown paths, so only surface a completed
+ // recording after an explicit user stop.
if (!userRequestedStopRef.current) return;
if (isDismissedRef.current) return;
onRecordingCompleteRef.current({
@@ -102,14 +113,28 @@ export const AudioMessageRecorder = forwardRef<
}
}, [seconds, announcedTime]);
- const BAR_COUNT = 28;
+ useElementSizeObserver(
+ useCallback(() => containerRef.current, []),
+ useCallback((width) => {
+ const availableWaveformWidth = Math.max(0, width - RECORDER_CHROME_PX);
+ const nextBarCount = Math.max(
+ MIN_BAR_COUNT,
+ Math.min(
+ MAX_BAR_COUNT,
+ Math.floor((availableWaveformWidth + BAR_GAP_PX) / (BAR_WIDTH_PX + BAR_GAP_PX))
+ )
+ );
+ setBarCount((current) => (current === nextBarCount ? current : nextBarCount));
+ }, [])
+ );
+
const bars = useMemo(() => {
if (levels.length === 0) {
- return Array(BAR_COUNT).fill(0.15);
+ return Array(barCount).fill(0.15);
}
- if (levels.length <= BAR_COUNT) {
- const step = (levels.length - 1) / (BAR_COUNT - 1);
- return Array.from({ length: BAR_COUNT }, (_, i) => {
+ if (levels.length <= barCount) {
+ const step = (levels.length - 1) / (barCount - 1);
+ return Array.from({ length: barCount }, (_, i) => {
const position = i * step;
const lower = Math.floor(position);
const upper = Math.min(Math.ceil(position), levels.length - 1);
@@ -120,14 +145,14 @@ export const AudioMessageRecorder = forwardRef<
return (levels[lower] ?? 0.15) * (1 - fraction) + (levels[upper] ?? 0.15) * fraction;
});
}
- const step = levels.length / BAR_COUNT;
- return Array.from({ length: BAR_COUNT }, (_, i) => {
+ const step = levels.length / barCount;
+ return Array.from({ length: barCount }, (_, i) => {
const start = Math.floor(i * step);
const end = Math.floor((i + 1) * step);
const slice = levels.slice(start, end);
return slice.length > 0 ? Math.max(...slice) : 0.15;
});
- }, [levels]);
+ }, [barCount, levels]);
const containerClassName = [css.Container, isCanceling ? css.ContainerCanceling : null]
.filter(Boolean)
@@ -140,10 +165,15 @@ export const AudioMessageRecorder = forwardRef<
{error}
)}
-
+
-
+
{bars.map((level, i) => (
(
[setSelectedFiles, selectedFiles]
);
+ const handleAudioRecordingComplete = useCallback(
+ (payload: AudioRecordingCompletePayload) => {
+ const extension = getSupportedAudioExtension(payload.audioCodec);
+ const file = new File(
+ [payload.audioBlob],
+ `sable-audio-message-${Date.now()}.${extension}`,
+ {
+ type: payload.audioCodec,
+ }
+ );
+ handleFiles([file], {
+ waveform: payload.waveform,
+ audioDuration: payload.audioLength,
+ });
+ setShowAudioRecorder(false);
+ },
+ [handleFiles]
+ );
+
+ const audioRecorder = showAudioRecorder ? (
+
setShowAudioRecorder(false)}
+ onRecordingComplete={handleAudioRecordingComplete}
+ onAudioLengthUpdate={() => {}}
+ onWaveformUpdate={() => {}}
+ />
+ ) : undefined;
+
const handleCancelUpload = (uploads: Upload[]) => {
uploads.forEach((upload) => {
if (upload.status === UploadStatus.Loading) {
@@ -1136,10 +1169,12 @@ export const RoomInput = forwardRef(
editableName="RoomInput"
editor={editor}
key={inputKey}
- placeholder={showAudioRecorder && mobileOrTablet() ? '' : 'Send a message...'}
+ placeholder="Send a message..."
onKeyDown={handleKeyDown}
onKeyUp={handleKeyUp}
onPaste={handlePaste}
+ responsiveAfter={audioRecorder}
+ forceMultilineLayout={showAudioRecorder}
top={
<>
{scheduledTime && (
@@ -1241,45 +1276,19 @@ export const RoomInput = forwardRef(
>
}
before={
- !(showAudioRecorder && mobileOrTablet()) && (
- pickFile('*')}
- variant="SurfaceVariant"
- size="300"
- radii="300"
- title="Upload File"
- aria-label="Upload and attach a File"
- >
-
-
- )
+ pickFile('*')}
+ variant="SurfaceVariant"
+ size="300"
+ radii="300"
+ title="Upload File"
+ aria-label="Upload and attach a File"
+ >
+
+
}
after={
<>
- {showAudioRecorder && (
- setShowAudioRecorder(false)}
- onRecordingComplete={(payload) => {
- const extension = getSupportedAudioExtension(payload.audioCodec);
- const file = new File(
- [payload.audioBlob],
- `sable-audio-message-${Date.now()}.${extension}`,
- {
- type: payload.audioCodec,
- }
- );
- handleFiles([file], {
- waveform: payload.waveform,
- audioDuration: payload.audioLength,
- });
- setShowAudioRecorder(false);
- }}
- onAudioLengthUpdate={() => {}}
- onWaveformUpdate={() => {}}
- />
- )}
-
{/* ── Mic button — always present; icon swaps to Stop while recording ── */}
(
aria-label={showAudioRecorder ? 'Stop recording' : 'Record audio message'}
aria-pressed={showAudioRecorder}
onClick={() => {
- if (mobileOrTablet()) return;
+ if (mobileOrTablet() && !showAudioRecorder) return;
if (showAudioRecorder) {
audioRecorderRef.current?.stop();
} else {
diff --git a/src/app/features/room/RoomViewFollowing.css.ts b/src/app/features/room/RoomViewFollowing.css.ts
index 18b53ac92..3f7bee353 100644
--- a/src/app/features/room/RoomViewFollowing.css.ts
+++ b/src/app/features/room/RoomViewFollowing.css.ts
@@ -19,6 +19,7 @@ export const RoomViewFollowing = recipe({
backgroundColor: color.Surface.Container,
color: color.Surface.OnContainer,
outline: 'none',
+ userSelect: 'none',
},
],
variants: {
diff --git a/src/app/plugins/voice-recorder-kit/supportedCodec.ts b/src/app/plugins/voice-recorder-kit/supportedCodec.ts
index 445147cda..a1b094612 100644
--- a/src/app/plugins/voice-recorder-kit/supportedCodec.ts
+++ b/src/app/plugins/voice-recorder-kit/supportedCodec.ts
@@ -13,14 +13,17 @@ const safariPreferredCodecs = [
];
const defaultPreferredCodecs = [
- // Chromium / Firefox stable path.
- 'audio/webm;codecs=opus',
- 'audio/webm',
- // Firefox
+ // Firefox: ogg produces seekable blobs; webm passes isTypeSupported() but
+ // records without a cue index so currentTime assignment silently fails.
+ // Must come before webm so Firefox picks ogg.
'audio/ogg;codecs=opus',
- 'audio/ogg;codecs=vorbis',
'audio/ogg',
+ // Chromium: webm is seekable and preferred. Since Chromium doesn't support
+ // ogg recording, it will skip the above and land here.
+ 'audio/webm;codecs=opus',
+ 'audio/webm',
// Fallbacks
+ 'audio/ogg;codecs=vorbis',
'audio/wav;codecs=1',
'audio/wav',
'audio/mpeg',
diff --git a/src/app/plugins/voice-recorder-kit/useVoiceRecorder.test.tsx b/src/app/plugins/voice-recorder-kit/useVoiceRecorder.test.tsx
new file mode 100644
index 000000000..4bd620fad
--- /dev/null
+++ b/src/app/plugins/voice-recorder-kit/useVoiceRecorder.test.tsx
@@ -0,0 +1,186 @@
+import { act, renderHook, waitFor } from '@testing-library/react';
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+import { useVoiceRecorder } from './useVoiceRecorder';
+
+type MockTrack = MediaStreamTrack & { stop: ReturnType };
+type MockStream = MediaStream & { getTracks: () => MockTrack[] };
+
+type MockNode = {
+ connect: ReturnType;
+ disconnect: ReturnType;
+};
+
+type MockAnalyserNode = MockNode & {
+ fftSize: number;
+ smoothingTimeConstant: number;
+ frequencyBinCount: number;
+ getByteFrequencyData: ReturnType;
+};
+
+type MockAudioContextInstance = {
+ state: AudioContextState;
+ destination: MockNode;
+ close: ReturnType;
+ resume: ReturnType;
+ suspend: ReturnType;
+ createMediaStreamSource: ReturnType;
+ createAnalyser: ReturnType;
+ createMediaStreamDestination: ReturnType;
+ createMediaElementSource: ReturnType;
+};
+
+const nativeAudioContext = globalThis.AudioContext;
+const nativeMediaRecorder = globalThis.MediaRecorder;
+const nativeRequestAnimationFrame = globalThis.requestAnimationFrame;
+const nativeCancelAnimationFrame = globalThis.cancelAnimationFrame;
+const nativeMediaDevices = navigator.mediaDevices;
+
+let inputTrack: MockTrack;
+let inputStream: MockStream;
+let destinationTrack: MockTrack;
+let createdAudioContexts: MockAudioContextInstance[];
+
+function createMockTrack(): MockTrack {
+ return {
+ stop: vi.fn(),
+ } as unknown as MockTrack;
+}
+
+function createMockNode(): MockNode {
+ return {
+ connect: vi.fn(),
+ disconnect: vi.fn(),
+ };
+}
+
+function createMockAnalyserNode(): MockAnalyserNode {
+ return {
+ ...createMockNode(),
+ fftSize: 0,
+ smoothingTimeConstant: 0,
+ frequencyBinCount: 16,
+ getByteFrequencyData: vi.fn((data: Uint8Array) => data.fill(0)),
+ };
+}
+
+class MockMediaRecorder {
+ public static isTypeSupported = vi.fn(() => true);
+
+ public state: RecordingState = 'inactive';
+
+ public ondataavailable: ((event: BlobEvent) => void) | null = null;
+
+ public onstop: (() => void) | null = null;
+
+ constructor(public readonly stream: MediaStream) {}
+
+ start() {
+ this.state = 'recording';
+ }
+
+ stop() {
+ if (this.state === 'inactive') return;
+ this.state = 'inactive';
+ this.onstop?.();
+ }
+
+ public requestData = vi.fn();
+
+ pause() {
+ this.state = 'paused';
+ }
+}
+
+function createMockAudioContext(): MockAudioContextInstance {
+ const context: MockAudioContextInstance = {
+ state: 'running',
+ destination: createMockNode(),
+ close: vi.fn(async () => {
+ context.state = 'closed';
+ }),
+ resume: vi.fn(async () => {
+ context.state = 'running';
+ }),
+ suspend: vi.fn(async () => {
+ context.state = 'suspended';
+ }),
+ createMediaStreamSource: vi.fn(() => createMockNode()),
+ createAnalyser: vi.fn(() => createMockAnalyserNode()),
+ createMediaStreamDestination: vi.fn(() => ({
+ ...createMockNode(),
+ stream: {
+ getTracks: () => [destinationTrack],
+ },
+ })),
+ createMediaElementSource: vi.fn(() => createMockNode()),
+ };
+ createdAudioContexts.push(context);
+ return context;
+}
+
+function MockAudioContext(): MockAudioContextInstance {
+ return createMockAudioContext();
+}
+
+beforeEach(() => {
+ inputTrack = createMockTrack();
+ destinationTrack = createMockTrack();
+ inputStream = {
+ getTracks: () => [inputTrack],
+ } as unknown as MockStream;
+ createdAudioContexts = [];
+
+ Object.defineProperty(navigator, 'mediaDevices', {
+ configurable: true,
+ value: {
+ getUserMedia: vi.fn(async () => inputStream),
+ },
+ });
+
+ globalThis.requestAnimationFrame = vi.fn(() => 1);
+ globalThis.cancelAnimationFrame = vi.fn();
+
+ globalThis.AudioContext = MockAudioContext as unknown as typeof AudioContext;
+
+ globalThis.MediaRecorder = MockMediaRecorder as unknown as typeof MediaRecorder;
+});
+
+afterEach(() => {
+ globalThis.AudioContext = nativeAudioContext;
+ globalThis.MediaRecorder = nativeMediaRecorder;
+ globalThis.requestAnimationFrame = nativeRequestAnimationFrame;
+ globalThis.cancelAnimationFrame = nativeCancelAnimationFrame;
+ Object.defineProperty(navigator, 'mediaDevices', {
+ configurable: true,
+ value: nativeMediaDevices,
+ });
+});
+
+describe('useVoiceRecorder', () => {
+ it('fully tears down the recording graph when recording stops', async () => {
+ const { result } = renderHook(() => useVoiceRecorder({ autoStart: false }));
+
+ act(() => {
+ result.current.start();
+ });
+
+ await waitFor(() => {
+ expect(result.current.isRecording).toBe(true);
+ });
+
+ const recordingContext = createdAudioContexts[0];
+ expect(recordingContext).toBeDefined();
+
+ act(() => {
+ result.current.handleStop();
+ });
+
+ await waitFor(() => {
+ expect(result.current.isRecording).toBe(false);
+ });
+
+ expect(inputTrack.stop).toHaveBeenCalledTimes(1);
+ expect(destinationTrack.stop).toHaveBeenCalledTimes(1);
+ expect(recordingContext?.close).toHaveBeenCalledTimes(1);
+ });
+});
diff --git a/src/app/plugins/voice-recorder-kit/useVoiceRecorder.ts b/src/app/plugins/voice-recorder-kit/useVoiceRecorder.ts
index f73f7daf0..e623c1b17 100644
--- a/src/app/plugins/voice-recorder-kit/useVoiceRecorder.ts
+++ b/src/app/plugins/voice-recorder-kit/useVoiceRecorder.ts
@@ -72,6 +72,10 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
const chunksRef = useRef([]);
const streamRef = useRef(null);
const audioContextRef = useRef(null);
+ const recordingSourceRef = useRef(null);
+ const recordingAnalyserRef = useRef(null);
+ const recordingDestinationRef = useRef(null);
+ const recordingStreamRef = useRef(null);
const analyserRef = useRef(null);
const dataArrayRef = useRef(null);
const animationFrameIdRef = useRef(null);
@@ -107,20 +111,52 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
}
}, []);
+ const cleanupMediaRecorder = useCallback(() => {
+ const mediaRecorder = mediaRecorderRef.current;
+ mediaRecorderRef.current = null;
+ if (!mediaRecorder) return;
+ mediaRecorder.ondataavailable = null;
+ mediaRecorder.onstop = null;
+ }, []);
+
const cleanupAudioContext = useCallback(() => {
+ const audioContext = audioContextRef.current;
+ const recordingSource = recordingSourceRef.current;
+ const recordingAnalyser = recordingAnalyserRef.current;
+ const recordingDestination = recordingDestinationRef.current;
+ const recordingStream = recordingStreamRef.current;
+
if (animationFrameIdRef.current !== null) {
cancelAnimationFrame(animationFrameIdRef.current);
animationFrameIdRef.current = null;
}
frameCountRef.current = 0;
- if (audioContextRef.current) {
- if (audioContextRef.current.state !== 'closed') {
- audioContextRef.current.suspend().catch(() => {});
- }
- audioContextRef.current = null;
- }
+ audioContextRef.current = null;
+ recordingSourceRef.current = null;
+ recordingAnalyserRef.current = null;
+ recordingDestinationRef.current = null;
+ recordingStreamRef.current = null;
analyserRef.current = null;
dataArrayRef.current = null;
+
+ recordingStream?.getTracks().forEach((track) => track.stop());
+ recordingSource?.disconnect();
+ recordingAnalyser?.disconnect();
+ recordingDestination?.disconnect();
+
+ if (!audioContext) return;
+ if (recordingStream) {
+ // Recording contexts are disposable. Closing them fully releases the capture graph so
+ // mobile browsers do not keep the mic route or low-quality audio mode alive.
+ if (audioContext.state !== 'closed') {
+ audioContext.close().catch(() => {});
+ }
+ return;
+ }
+ // Playback reuses a shared context, so suspend it instead of tearing it down.
+ if (audioContext.state !== 'closed') {
+ audioContext.suspend().catch(() => {});
+ }
}, []);
const stopTimer = useCallback(() => {
@@ -219,7 +255,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
const setupAudioGraph = useCallback(
(stream: MediaStream): MediaStream => {
- const audioContext = getSharedAudioContext();
+ const audioContext = new AudioContext();
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
@@ -227,12 +263,16 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
analyser.smoothingTimeConstant = 0.6;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
+ recordingSourceRef.current = source;
+ recordingAnalyserRef.current = analyser;
analyserRef.current = analyser;
dataArrayRef.current = dataArray;
// Fix for iOS Safari: routing the stream through a MediaStreamDestination
// prevents the AudioContext from "stealing" the track from the MediaRecorder
const destination = audioContext.createMediaStreamDestination();
+ recordingDestinationRef.current = destination;
+ recordingStreamRef.current = destination.stream;
source.connect(analyser);
analyser.connect(destination);
@@ -306,6 +346,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
mediaRecorder.onstop = () => {
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
setIsPaused(false);
@@ -380,11 +421,13 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
setError('Microphone access denied or an error occurred.');
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
}
}, [
cleanupAudioContext,
+ cleanupMediaRecorder,
cleanupStream,
emitStopPayload,
getAudioLength,
@@ -458,6 +501,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
}
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
setIsStopped(true);
@@ -470,6 +514,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
audioFile,
audioUrl,
cleanupAudioContext,
+ cleanupMediaRecorder,
cleanupStream,
emitStopPayload,
stopTimer,
@@ -511,6 +556,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
}
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
setIsStopped(true);
@@ -523,6 +569,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
audioFile,
audioUrl,
cleanupAudioContext,
+ cleanupMediaRecorder,
cleanupStream,
emitStopPayload,
stopTimer,
@@ -681,6 +728,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
mediaRecorder.onstop = () => {
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
setIsPaused(false);
@@ -744,6 +792,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
setError('Microphone access denied or an error occurred.');
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
isResumingRef.current = false;
@@ -751,6 +800,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
}, [
audioCodec,
cleanupAudioContext,
+ cleanupMediaRecorder,
cleanupStream,
emitStopPayload,
getAudioLength,
@@ -772,6 +822,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsPlaying(false);
setIsStopped(true);
@@ -795,7 +846,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
if (onDelete) {
onDelete();
}
- }, [cleanupAudioContext, cleanupStream, onDelete, stopTimer]);
+ }, [cleanupAudioContext, cleanupMediaRecorder, cleanupStream, onDelete, stopTimer]);
const handleRestart = useCallback(() => {
isRestartingRef.current = true;
@@ -812,6 +863,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
cleanupAudioContext();
cleanupStream();
+ cleanupMediaRecorder();
stopTimer();
setIsRecording(false);
setIsStopped(false);
@@ -842,7 +894,7 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
setAudioUrl(null);
setAudioFile(null);
internalStartRecording();
- }, [cleanupAudioContext, cleanupStream, internalStartRecording, stopTimer]);
+ }, [cleanupAudioContext, cleanupMediaRecorder, cleanupStream, internalStartRecording, stopTimer]);
useEffect(() => {
if (autoStart) {
@@ -852,6 +904,8 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
const mediaRecorder = mediaRecorderRef.current;
if (mediaRecorder && mediaRecorder.state !== 'inactive') {
mediaRecorder.stop();
+ } else {
+ cleanupMediaRecorder();
}
cleanupAudioContext();
cleanupStream();
@@ -869,7 +923,14 @@ export function useVoiceRecorder(options: UseVoiceRecorderOptions = {}): UseVoic
temporaryPreviewUrlRef.current = null;
}
};
- }, [autoStart, cleanupAudioContext, cleanupStream, internalStartRecording, stopTimer]);
+ }, [
+ autoStart,
+ cleanupAudioContext,
+ cleanupMediaRecorder,
+ cleanupStream,
+ internalStartRecording,
+ stopTimer,
+ ]);
const getState = (): RecorderState => {
if (isPlaying) return 'playing';
diff --git a/vite.config.ts b/vite.config.ts
index a66f458a8..8dd6723cb 100644
--- a/vite.config.ts
+++ b/vite.config.ts
@@ -115,7 +115,7 @@ function serverMatrixSdkCryptoWasm() {
};
}
-export default defineConfig({
+export default defineConfig(({ command }) => ({
appType: 'spa',
publicDir: false,
base: buildConfig.base,
@@ -142,6 +142,7 @@ export default defineConfig({
server: {
port: 8080,
host: true,
+ allowedHosts: command === 'serve' ? true : undefined,
fs: {
// Allow serving files from one level up to the project root
allow: ['..'],
@@ -243,4 +244,4 @@ export default defineConfig({
plugins: [inject({ Buffer: ['buffer', 'Buffer'] }) as PluginOption],
},
},
-});
+}));