
记录两种播放pcm音频方法
单例 .h文件
#import
NS_ASSUME_NONNULL_BEGIN
@interface WYAudioPlayer : NSObject
+ (instancetype)sharedInstance;
- (void)playMusicWithPath:(NSURL *)fileURL;
//结束播放
- (void)stopPlay;
// 播放后台传来的声音流
- (void)playWithData: (NSData *)data;
- (void)clear;
/*
pcm文件转wav播放
*/
- (NSString *)getWavFileFromPcmData:(NSString *)filePath;
@end
方法一:直接播放pcm文件
#define MIN_SIZE_PER_FRAME 5000 //每个包的大小,室内机要求为960,具体看下面的配置信息
#define QUEUE_BUFFER_SIZE 3 //缓冲器个数
#define SAMPLE_RATE 16000 //采样频率
@interface WYAudioPlayer()
{
AudioQueueRef audioQueue; //音频播放队列
AudioStreamBasicDescription _audioDescription;
AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE]; //音频缓存
BOOL audioQueueBufferUsed[QUEUE_BUFFER_SIZE]; //判断音频缓存是否在使用
NSLock *sysnLock;
NSMutableData *tempData;
OSStatus osState;
}
@end
@implementation WYAudioPlayer
static WYAudioPlayer *_shareInstance;
static dispatch_once_t onceToken;
+ (instancetype)sharedInstance
{
dispatch_once(&onceToken, ^{
_shareInstance = [[self alloc] init];
});
return _shareInstance;
}
- (instancetype)init
{
self = [super init];
if (self) {
[self initAudioDescriptionSetting];
}
return self;
}
- (void)initAudioDescriptionSetting
{
sysnLock = [[NSLock alloc]init];
//设置音频参数 具体的信息需要问后台
_audioDescription.mSampleRate = SAMPLE_RATE;
_audioDescription.mFormatID = kAudioFormatLinearPCM;
// 下面这个是保存音频数据的方式的说明,如可以根据大端字节序或小端字节序,浮点数或整数以及不同体位去保存数据
_audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
//1单声道 2双声道
_audioDescription.mChannelsPerFrame = 1;
//每一个packet一侦数据,每个数据包下的桢数,即每个数据包里面有多少桢
_audioDescription.mFramesPerPacket = 1;
//每个采样点16bit量化 语音每采样点占用位数
_audioDescription.mBitsPerChannel = 16;
_audioDescription.mBytesPerFrame = (_audioDescription.mBitsPerChannel / 8) * _audioDescription.mChannelsPerFrame;
//每个数据包的bytes总数,每桢的bytes数*每个数据包的桢数
_audioDescription.mBytesPerPacket = _audioDescription.mBytesPerFrame * _audioDescription.mFramesPerPacket;
// 使用player的内部线程播放 新建输出
AudioQueueNewOutput(&_audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, 0, 0, &audioQueue);
// 设置音量
AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, 1.0);
// 初始化需要的缓冲区
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
audioQueueBufferUsed[i] = false;
osState = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]);
printf("第 %d 个AudioQueueAllocateBuffer 初始化结果 %d (0表示成功)", i + 1, osState);
}
osState = AudioQueueStart(audioQueue, NULL);
if (osState != noErr) {
NSLog(@"AudioQueueStart Error");
}
}
- (void)resetPlay
{
if (audioQueue != nil) {
AudioQueueReset(audioQueue);
}
}
// 播放相关
- (void)playWithData:(NSData *)data
{
[sysnLock lock];
tempData = [NSMutableData new];
[tempData appendData: data];
// 得到数据
NSUInteger len = tempData.length;
Byte *bytes = (Byte*)malloc(len);
[tempData getBytes:bytes length: len];
int i = 0;
while (true) {
if (!audioQueueBufferUsed[i]) {
audioQueueBufferUsed[i] = true;
break;
}else {
i++;
if (i >= QUEUE_BUFFER_SIZE) {
i = 0;
}
}
}
audioQueueBuffers[i] -> mAudioDataByteSize = (unsigned int)len;
// 把bytes的头地址开始的len字节给mAudioData
memcpy(audioQueueBuffers[i] -> mAudioData, bytes, len);
//
if (bytes) {
free(bytes);
}
AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffers[i], 0, NULL);
printf("本次播放数据大小: %lu", (unsigned long)len);
[sysnLock unlock];
}
// ************************** 回调 **********************************
// 回调回来把buffer状态设为未使用
static void AudioPlayerAQInputCallback(void* inUserData,AudioQueueRef audioQueueRef, AudioQueueBufferRef audioQueueBufferRef) {
WYAudioPlayer* player = (__bridge WYAudioPlayer*)inUserData;
[player resetBufferState:audioQueueRef and:audioQueueBufferRef];
}
- (void)resetBufferState:(AudioQueueRef)audioQueueRef and:(AudioQueueBufferRef)audioQueueBufferRef {
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
// 将这个buffer设为未使用
if (audioQueueBufferRef == audioQueueBuffers[i]) {
audioQueueBufferUsed[i] = false;
}
}
}
// ************************** 内存回收 **********************************
- (void)dealloc
{
if (audioQueue != nil) {
AudioQueueStop(audioQueue,true);
}
audioQueue = nil;
sysnLock = nil;
avAudioPlayer = nil;
avAudioPlayer.delegate = nil;
}
/********* 单例清除 ***********/
- (void)clear
{
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryPlayAndRecord
withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker
error:nil];
if (audioQueue != nil) {
AudioQueueStop(audioQueue,true);
}
audioQueue = nil;
sysnLock = nil;
onceToken = 0; // 只有置成0,GCD才会认为它从未执行过.它默认为0.这样才能保证下次再次调用shareInstance的时候,再次创建对象.
_shareInstance = nil;
}
@end
播放pcm音频直接调用:
/*
urlStr:http://xxx.xxx.xxx.xxx:xxxx/509.pcm
pcm文件地址
*/
NSData *audioData = [NSData dataWithContentsOfURL:[NSURL URLWithString:urlStr]];
NSMutableData *mData = [[NSMutableData alloc] initWithData:audioData];
NSInteger tem = 5000;
NSInteger count = mData.length/tem+1;
for (int i = 0; i
方法二:将PCM数据文件转换为WAV文件播放wav音频
将PCM数据文件转换为WAV文件其实就是在PCM数据前加上WAV的头。只需要设置好码率,声道数,采样位数就可以。简单来说:pcm是无损wav文件中音频数据的一种编码方式,pcm和wav的区别,简单概括成pcm少了一个wav头描述信息。
将pcm转成wav:
导入头文件
#import
@interface WYAudioPlayer()
{
AVAudioPlayer *avAudioPlayer;
}
@end
@implementation WYAudioPlayer
static WYAudioPlayer *_shareInstance;
static dispatch_once_t onceToken;
+ (instancetype)sharedInstance
{
dispatch_once(&onceToken, ^{
_shareInstance = [[self alloc] init];
});
return _shareInstance;
}
- (NSString *)getWavFileFromPcmData:(NSString *)filePath
{
NSString *wavFilePath = [self wavFilePath]; //wav文件的路径
NSLog(@"PCM file path : %@",filePath); //pcm文件的路径
FILE *fout;
short NumChannels = 1; //录音通道数
short BitsPerSample = 16; //线性采样位数
int SamplingRate = 16000; //录音采样率(Hz)
int numOfSamples = (int)[[NSData dataWithContentsOfURL:[NSURL URLWithString:filePath]] length];
int ByteRate = NumChannels*BitsPerSample*SamplingRate/8;
short BlockAlign = NumChannels*BitsPerSample/8;
int DataSize = NumChannels*numOfSamples*BitsPerSample/8;
int chunkSize = 16;
int totalSize = 46 + DataSize;
short audioFormat = 1;
if((fout = fopen([wavFilePath cStringUsingEncoding:1], "w")) == NULL)
{
printf("Error opening out file ");
}
fwrite("RIFF", sizeof(char), 4,fout);
fwrite(&totalSize, sizeof(int), 1, fout);
fwrite("WAVE", sizeof(char), 4, fout);
fwrite("fmt ", sizeof(char), 4, fout);
fwrite(&chunkSize, sizeof(int),1,fout);
fwrite(&audioFormat, sizeof(short), 1, fout);
fwrite(&NumChannels, sizeof(short),1,fout);
fwrite(&SamplingRate, sizeof(int), 1, fout);
fwrite(&ByteRate, sizeof(int), 1, fout);
fwrite(&BlockAlign, sizeof(short), 1, fout);
fwrite(&BitsPerSample, sizeof(short), 1, fout);
fwrite("data", sizeof(char), 4, fout);
fwrite(&DataSize, sizeof(int), 1, fout);
fclose(fout);
NSMutableData *pamdata = [NSMutableData dataWithContentsOfURL:[NSURL URLWithString:filePath]];
NSFileHandle *handle;
handle = [NSFileHandle fileHandleForUpdatingAtPath:wavFilePath];
[handle seekToEndOfFile];
[handle writeData:pamdata];
[handle closeFile];
return wavFilePath;
}
- (NSString *)wavFilePath
{
NSString *path = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString *urlPath = [path stringByAppendingPathComponent:@"speaker.wav"];
return urlPath;
}
//音乐播放
- (void)playMusicWithPath:(NSURL *)fileURL
{
[self stopPlay];
// 1.获取要播放音频文件的URL
// 2.创建 AVAudioPlayer 对象
avAudioPlayer = [[AVAudioPlayer alloc]initWithContentsOfURL:fileURL error:nil];
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayback error:nil];
// 3.打印歌曲信息
// NSString *msg = [NSString stringWithFormat:@"音频文件声道数:%ld\n 音频文件持续时间:%g",audioPlayer.numberOfChannels,audioPlayer.duration];
// 4.设置循环播放
avAudioPlayer.numberOfLoops = 0;
avAudioPlayer.volume = 1;
avAudioPlayer.delegate = self;
// 5.开始播放
[avAudioPlayer play];
}
结束播放
- (void)stopPlay
{
// NSLog(@"销毁播放器");
[avAudioPlayer stop];
avAudioPlayer = nil;
avAudioPlayer.delegate = nil;
}
@end
播放wav音频直接调用:
/*
urlStr:http://xxx.xxx.xxx.xxx:xxxx/509.pcm
pcm文件地址
*/
NSString *wavData = [[WYAudioPlayer sharedInstance] getWavFileFromPcmData:urlStr];
[[WYAudioPlayer sharedInstance] playMusicWithPath:[NSURL URLWithString:wavData]];
欢迎分享,转载请注明来源:内存溢出
微信扫一扫
支付宝扫一扫
评论列表(0条)