<input type="button" value="开始录音" onclick="startRecording()"/>
<input type="button" value="获取录音" onclick="obtainRecord()"/>
<input type="button" value="停止录音" onclick="stopRecord()"/>
<input type="button" value="播放录音" onclick="playRecord()"/>
<video id="video1" width="320px" height="240px" controls autoplay ></video>
<video id="video2" width="320px" height="240px" controls autoplay ></video>
<canvas id="canvas1" width="320" height="240"></canvas>
<input type="button" value="拍摄" onclick="scamera()"/>
<input type="button" value="播放视频" onclick="playVideo()"/>
js文件:
[javascript] view plain copy(function (window) {
//兼容
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
var HZRecorder = function (stream, config) {
config = config || {}
config.sampleBits = config.sampleBits || 8 //采样数位 8, 16
config.sampleRate = config.sampleRate || (44100 / 6) //采样率(1/6 44100)
//创建一个音频环境对象
audioContext = window.AudioContext || window.webkitAudioContext
var context = new audioContext()
//将声音输入这个对像
var audioInput = context.createMediaStreamSource(stream)
//设置音量节点
var volume = context.createGain()
audioInput.connect(volume)
//创建缓存,用来缓存声音
var bufferSize = 4096
// 创建声音的缓存节点,createScriptProcessor方法的
// 第二个和第三个参数指的是输入和输出都是双声道。
var recorder = context.createScriptProcessor(bufferSize, 2, 2)
var audioData = {
size: 0 //录音文件长度
, buffer: [] //录音缓存
, inputSampleRate: context.sampleRate//输入采样率
, inputSampleBits: 16 //输入采样数位 8, 16
, outputSampleRate: config.sampleRate//输出采样率
, oututSampleBits: config.sampleBits //输出采样数位 8, 16
, input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
}
, compress: function () { //合并压缩
//合并
var data = new Float32Array(this.size)
var offset = 0
for (var i = 0i <this.buffer.lengthi++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
//压缩
var compression = parseInt(this.inputSampleRate / this.outputSampleRate)
var length = data.length / compression
var result = new Float32Array(length)
var index = 0, j = 0
while (index <length) {
result[index] = data[j]
j += compression
index++
}
return result
}
, encodeWAV: function () {
var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
var bytes = this.compress()
var dataLength = bytes.length * (sampleBits / 8)
var buffer = new ArrayBuffer(44 + dataLength)
var data = new DataView(buffer)
var channelCount = 1//单声道
var offset = 0
var writeString = function (str) {
for (var i = 0i <str.lengthi++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// 资源交换文件标识符
writeString('RIFF')offset += 4
// 下个地址开始到文件尾总字节数,即文件大小-8
data.setUint32(offset, 36 + dataLength, true)offset += 4
// WAV文件标志
writeString('WAVE')offset += 4
// 波形格式标志
writeString('fmt ')offset += 4
// 过滤字节,一般为 0x10 = 16
data.setUint32(offset, 16, true)offset += 4
// 格式类别 (PCM形式采样数据)
data.setUint16(offset, 1, true)offset += 2
// 通道数
data.setUint16(offset, channelCount, true)offset += 2
// 采样率,每秒样本数,表示每个通道的播放速度
data.setUint32(offset, sampleRate, true)offset += 4
// 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true)offset += 4
// 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
data.setUint16(offset, channelCount * (sampleBits / 8), true)offset += 2
// 每样本数据位数
data.setUint16(offset, sampleBits, true)offset += 2
// 数据标识符
writeString('data')offset += 4
// 采样数据总数,即数据总大小-44
data.setUint32(offset, dataLength, true)offset += 4
// 写入采样数据
if (sampleBits === 8) {
for (var i = 0i <bytes.lengthi++, offset++) {
var s = Math.max(-1, Math.min(1, bytes[i]))
var val = s <0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (var i = 0i <bytes.lengthi++, offset += 2) {
var s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s <0 ? s * 0x8000 : s * 0x7FFF, true)
}
}
return new Blob([data], { type: 'audio/wav' })
}
}
//开始录音
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
//停止
this.stop = function () {
recorder.disconnect()
}
//获取音频文件
this.getBlob = function () {
this.stop()
return audioData.encodeWAV()
}
//回放
this.play = function (audio) {
audio.src = window.URL.createObjectURL(this.getBlob())
}
//上传
this.upload = function (url, callback) {
var fd = new FormData()
fd.append('audioData', this.getBlob())
var xhr = new XMLHttpRequest()
if (callback) {
xhr.upload.addEventListener('progress', function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load', function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error', function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort', function (e) {
callback('cancel', e)
}, false)
}
xhr.open('POST', url)
xhr.send(fd)
}
//音频采集
recorder.onaudioprocess = function (e) {
audioData.input(e.inputBuffer.getChannelData(0))
//record(e.inputBuffer.getChannelData(0))
}
}
//抛出异常
HZRecorder.throwError = function (message) {
throw new function () { this.toString = function () { return message}}
}
//是否支持录音
HZRecorder.canRecording = (navigator.getUserMedia != null)
//获取录音机
HZRecorder.get = function (callback, config) {
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true } //只启用音频
, function (stream) {
var rec = new HZRecorder(stream, config)
callback(rec)
}
, function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
HZRecorder.throwError('用户拒绝提供信息。')
break
case 'NOT_SU
Safari不支持HTML5录音。现在浏览器中最好的解决方案是WebRTC下的 navigator.getUserMedia API。
可是当使用Can I use查看兼容性的时候,
在前端做分状态异步加载,当我检测到浏览器支持相关API(getUserMedia),直接切换到HTML5模式,Div中装载Canvas、WebWorker等等;反之,就加载Flash控件。
事实上Audio和Video标签是Safari 3.1以后开始支持的,至于之前的Safari基本上就靠Quicktime了。
另外支不支持HTML5 API,取决的是浏览器实现,和系统关系不大。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)