Audio Producer Skill
I help you build audio players, process audio, and create interactive sound experiences for the web.
What I Do
Audio Playback:
Custom audio players Playlist management Playback controls (play, pause, seek, volume) Waveform visualization
Audio Processing:
Audio effects (reverb, delay, filters) Equalization and mixing Audio recording Real-time audio manipulation
Interactive Audio:
Background music and sound effects User interaction sounds Spatial audio Audio notifications Custom Audio Player // components/AudioPlayer.tsx 'use client' import { useState, useRef, useEffect } from 'react'
interface AudioPlayerProps { src: string title?: string artist?: string }
export function AudioPlayer({ src, title, artist }: AudioPlayerProps) {
const audioRef = useRef
useEffect(() => { const audio = audioRef.current if (!audio) return
const updateTime = () => setCurrentTime(audio.currentTime)
const updateDuration = () => setDuration(audio.duration)
audio.addEventListener('timeupdate', updateTime)
audio.addEventListener('loadedmetadata', updateDuration)
audio.addEventListener('ended', () => setPlaying(false))
return () => {
audio.removeEventListener('timeupdate', updateTime)
audio.removeEventListener('loadedmetadata', updateDuration)
audio.removeEventListener('ended', () => setPlaying(false))
}
}, [])
const togglePlay = () => { if (!audioRef.current) return
if (playing) {
audioRef.current.pause()
} else {
audioRef.current.play()
}
setPlaying(!playing)
}
const handleSeek = (e: React.ChangeEvent
const handleVolumeChange = (e: React.ChangeEvent
const formatTime = (seconds: number) => {
const mins = Math.floor(seconds / 60)
const secs = Math.floor(seconds % 60)
return ${mins}:${secs.toString().padStart(2, '0')}
}
return (
{/* Track Info */}
{(title || artist) && (
<div className="mb-4">
{title && <h3 className="font-semibold text-lg">{title}</h3>}
{artist && <p className="text-gray-600 text-sm">{artist}</p>}
</div>
)}
{/* Progress Bar */}
<div className="mb-4">
<input
type="range"
min="0"
max={duration || 0}
value={currentTime}
onChange={handleSeek}
className="w-full"
/>
<div className="flex justify-between text-sm text-gray-600 mt-1">
<span>{formatTime(currentTime)}</span>
<span>{formatTime(duration)}</span>
</div>
</div>
{/* Controls */}
<div className="flex items-center gap-4">
<button
onClick={togglePlay}
className="w-12 h-12 bg-blue-600 text-white rounded-full flex items-center justify-center hover:bg-blue-700"
>
{playing ? '⏸️' : '▶️'}
</button>
<div className="flex items-center gap-2 flex-1">
<span className="text-sm">🔊</span>
<input
type="range"
min="0"
max="1"
step="0.1"
value={volume}
onChange={handleVolumeChange}
className="flex-1"
/>
</div>
</div>
</div>
) }
Podcast Player // components/PodcastPlayer.tsx 'use client' import { useState } from 'react' import { AudioPlayer } from './AudioPlayer'
interface Episode { id: string title: string description: string audioUrl: string duration: number publishedAt: string }
export function PodcastPlayer({ episodes }: { episodes: Episode[] }) {
const [currentEpisode, setCurrentEpisode] = useState
return (
<div className="mt-6">
<h3 className="font-semibold mb-4">Episodes</h3>
<div className="space-y-2">
{episodes.map((episode) => (
<button
key={episode.id}
onClick={() => setCurrentEpisode(episode)}
className={`w-full text-left p-4 rounded-lg ${
currentEpisode.id === episode.id
? 'bg-blue-100 border-2 border-blue-600'
: 'bg-gray-100'
}`}
>
<h4 className="font-medium">{episode.title}</h4>
<p className="text-sm text-gray-600 mt-1">
{episode.description}
</p>
<p className="text-xs text-gray-500 mt-2">
{new Date(episode.publishedAt).toLocaleDateString()}
</p>
</button>
))}
</div>
</div>
</div>
) }
Waveform Visualization // components/Waveform.tsx 'use client' import { useEffect, useRef } from 'react'
export function Waveform({ audioSrc }: { audioSrc: string }) {
const canvasRef = useRef
useEffect(() => { const canvas = canvasRef.current const audio = audioRef.current if (!canvas || !audio) return
const ctx = canvas.getContext('2d')
if (!ctx) return
const audioContext = new AudioContext()
const source = audioContext.createMediaElementSource(audio)
const analyser = audioContext.createAnalyser()
source.connect(analyser)
analyser.connect(audioContext.destination)
analyser.fftSize = 256
const bufferLength = analyser.frequencyBinCount
const dataArray = new Uint8Array(bufferLength)
const draw = () => {
requestAnimationFrame(draw)
analyser.getByteFrequencyData(dataArray)
ctx.fillStyle = 'rgb(0, 0, 0)'
ctx.fillRect(0, 0, canvas.width, canvas.height)
const barWidth = (canvas.width / bufferLength) * 2.5
let x = 0
for (let i = 0; i < bufferLength; i++) {
const barHeight = (dataArray[i] / 255) * canvas.height
ctx.fillStyle = `rgb(${barHeight + 100}, 50, 50)`
ctx.fillRect(x, canvas.height - barHeight, barWidth, barHeight)
x += barWidth + 1
}
}
draw()
}, [audioSrc])
return (
Audio Recording // hooks/useAudioRecorder.ts 'use client' import { useState, useRef } from 'react'
export function useAudioRecorder() {
const [recording, setRecording] = useState(false)
const [audioURL, setAudioURL] = useState
const startRecording = async () => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) const mediaRecorder = new MediaRecorder(stream)
mediaRecorder.ondataavailable = e => {
chunksRef.current.push(e.data)
}
mediaRecorder.onstop = () => {
const blob = new Blob(chunksRef.current, { type: 'audio/webm' })
const url = URL.createObjectURL(blob)
setAudioURL(url)
chunksRef.current = []
}
mediaRecorder.start()
mediaRecorderRef.current = mediaRecorder
setRecording(true)
} catch (error) {
console.error('Failed to start recording:', error)
}
}
const stopRecording = () => { if (mediaRecorderRef.current && recording) { mediaRecorderRef.current.stop() mediaRecorderRef.current.stream.getTracks().forEach(track => track.stop()) setRecording(false) } }
return { recording, audioURL, startRecording, stopRecording } }
Usage:
'use client' import { useAudioRecorder } from '@/hooks/useAudioRecorder'
export function VoiceRecorder() { const { recording, audioURL, startRecording, stopRecording } = useAudioRecorder()
return (
{audioURL && (
<div className="mt-4">
<audio src={audioURL} controls />
<a
href={audioURL}
download="recording.webm"
className="mt-2 inline-block px-4 py-2 bg-green-600 text-white rounded"
>
Download Recording
</a>
</div>
)}
</div>
) }
Audio Effects // lib/audio-effects.ts
export class AudioEffects { private audioContext: AudioContext private source: MediaElementAudioSourceNode private gainNode: GainNode private filterNode: BiquadFilterNode
constructor(audioElement: HTMLAudioElement) { this.audioContext = new AudioContext() this.source = this.audioContext.createMediaElementSource(audioElement) this.gainNode = this.audioContext.createGain() this.filterNode = this.audioContext.createBiquadFilter()
// Connect: source -> filter -> gain -> destination
this.source.connect(this.filterNode)
this.filterNode.connect(this.gainNode)
this.gainNode.connect(this.audioContext.destination)
}
setVolume(value: number) { this.gainNode.gain.value = value }
setLowPassFilter(frequency: number) { this.filterNode.type = 'lowpass' this.filterNode.frequency.value = frequency }
setHighPassFilter(frequency: number) { this.filterNode.type = 'highpass' this.filterNode.frequency.value = frequency }
setBandPassFilter(frequency: number) { this.filterNode.type = 'bandpass' this.filterNode.frequency.value = frequency } }
Spatial Audio // components/SpatialAudio.tsx 'use client' import { useEffect, useRef, useState } from 'react'
export function SpatialAudio({ audioSrc }: { audioSrc: string }) {
const audioRef = useRef
useEffect(() => { const audio = audioRef.current if (!audio) return
const audioContext = new AudioContext()
const source = audioContext.createMediaElementSource(audio)
const panner = audioContext.createPanner()
panner.panningModel = 'HRTF'
panner.distanceModel = 'inverse'
panner.refDistance = 1
panner.maxDistance = 10000
source.connect(panner)
panner.connect(audioContext.destination)
// Update panner position based on mouse/touch
panner.setPosition(position.x, position.y, 0)
}, [position])
return (
Move your mouse to change sound position
When to Use Me
Perfect for:
Building audio players Creating podcast platforms Adding sound effects Implementing voice recording Processing audio in real-time
I'll help you:
Build custom audio players Add waveform visualizations Implement audio recording Apply audio effects Create spatial audio experiences What I'll Create 🎵 Audio Players 🎙️ Voice Recorders 📊 Waveform Visualizations 🎛️ Audio Effects 🎧 Spatial Audio 🎼 Playlist Management
Let's create amazing audio experiences!
← 返回排行榜