自学内容网 自学内容网

QT使用websocket实现语音对讲

简介:

    本文所描述的功能和代码,是基于QT的开发环境。在QT上使用websocket,接受和发送pcm音频,实现了语音对讲功能。经自测,该功能可以正常使用,以下是相关代码的分享。

在这里插入图片描述

void MainWindow::on_pushButton_OpenTalk_clicked()
{
    QString sUrl = ui->lineEdit_ws->text();
    ctWsTalk::getInstance().startWsTalk(sUrl);
}
void MainWindow::on_pushButton_CloseTalk_clicked()
{
    ctWsTalk::getInstance().stopWsTalk();
}

ctWsTalk.h

#ifndef CTWSTALK_H
#define CTWSTALK_H

#include <QAudio>
#include <QFile>
#include <QElapsedTimer>
#include <list>
#include <QWebSocket>
#include <QObject>

class ctWsTalk : public QObject
{
    Q_OBJECT
public:
    ctWsTalk();
    ~ctWsTalk();

    static ctWsTalk& getInstance();

    void startWsTalk(QString url);
    void stopWsTalk();

public slots:
    void onConnected();
    void onDisconnected();
    void onBinaryMessageReceived(const QByteArray& data);

private slots:
    void handleStateChanged_input(QAudio::State newState);
    void handleAudioNotify();
    void handleAudioData();

private:
    void wsClientStart();
    void wsClientClose();
    void wsSendBinary(const QByteArray& binaryData);
    void initMicrophoneInput();
    void initSpeakerOutput();
    void startIntercom();
    void stopIntercom();

private:
    bool m_bWsConnect = false;
    int m_nAudioSize = 0;
    QFile m_fileIn;
    QFile m_fileOut;
    QElapsedTimer m_timer;
    QString m_sUrl;

    class QAudioInput* m_pAudioInput = nullptr;
    class QAudioOutput* m_pAudioOutput = nullptr;
    class QIODevice* m_pInputDevice = nullptr;
    QIODevice* m_pOutputDevice = nullptr;
    QWebSocket m_websocket;
    std::list<QByteArray> m_audioinDataList;

};

#endif // CTWSTALK_H

ctWsTalk.cpp

#include "ctwstalk.h"
#include <QAudioFormat>
#include <QAudioDeviceInfo>
#include <QAudioInput>
#include <QAudioOutput>
#include <QBuffer>
#include <QDebug>
#include <QDateTime>

#define WRITE_INTO_PCM 1

ctWsTalk::ctWsTalk()
{
    m_timer.start();
}

ctWsTalk::~ctWsTalk()
{
   stopWsTalk();
}

ctWsTalk &ctWsTalk::getInstance()
{
    static ctWsTalk s_obj;
    return s_obj;
}

void ctWsTalk::startWsTalk(QString url)
{
    qDebug() << "ctWsTalk::startWsTalk url:" << url;
    m_sUrl = url;

#ifdef WRITE_INTO_PCM
    if (!m_fileOut.isOpen())
    {
        m_fileOut.setFileName("AudioOut.pcm");
        m_fileOut.open(QIODevice::WriteOnly);
        m_timer.restart();
    }
#endif

#ifdef WRITE_INTO_PCM
    if (!m_fileIn.isOpen())
    {

        m_fileIn.setFileName("AudioIn.pcm");
        m_fileIn.open(QIODevice::WriteOnly);
    }
#endif

    wsClientStart();
    startIntercom();
}

void ctWsTalk::stopWsTalk()
{
#ifdef WRITE_INTO_PCM
    m_fileOut.close();
    m_fileIn.close();
#endif
    stopIntercom();
    wsClientClose();
}

void ctWsTalk::handleStateChanged_input(QAudio::State newState)
{
    switch (newState)
    {
        case QAudio::StoppedState:
            if (m_pAudioInput->error() != QAudio::NoError)
            {
                qDebug() << "AudioInput Error.";
            }
            break;
        default:
            break;
    }
}

void ctWsTalk::handleAudioNotify()
{
    if (m_audioinDataList.size())
    {
        auto data = m_audioinDataList.front();
        m_pOutputDevice->write(data);
        m_audioinDataList.pop_front();
    }
    if (m_nAudioSize)
        qDebug() << "Audio Recv, list size=" << m_audioinDataList.size() << ", recv audio interval(ms)=" << m_timer.elapsed() / m_nAudioSize;
}

void ctWsTalk::handleAudioData()
{
    // 读取音频数据
    QByteArray audioData = m_pInputDevice->readAll();
    wsSendBinary(audioData);

    qDebug() << "Audio Send, audioData.size():" << audioData.size();

#ifdef WRITE_INTO_PCM
    m_fileOut.write(audioData);
#endif
}

void ctWsTalk::onConnected()
{
    qDebug() << "hello world! Connected.";
    m_bWsConnect = true;
}

void ctWsTalk::onDisconnected()
{
    qDebug() << "Disconnected.";
    m_bWsConnect = false;
}

void ctWsTalk::onBinaryMessageReceived(const QByteArray &binaryData)
{
#ifdef WRITE_INTO_PCM
    m_fileIn.write(binaryData);
#endif
    if (!m_pOutputDevice)
        return;
    m_audioinDataList.push_back(binaryData);
    m_nAudioSize++;
}

void ctWsTalk::wsClientStart()
{
    QNetworkProxy proxy;
    proxy.setType(QNetworkProxy::NoProxy);
    m_websocket.setParent(this);
    m_websocket.setProxy(proxy);

    connect(&m_websocket, SIGNAL(connected()), this, SLOT(onConnected()));
    connect(&m_websocket, SIGNAL(disconnected()), this, SLOT(onDisconnected()));
    connect(&m_websocket, SIGNAL(binaryMessageReceived(const QByteArray&)), this, SLOT(onBinaryMessageReceived(const QByteArray&)), Qt::QueuedConnection);

    m_websocket.open(QUrl(m_sUrl));
    if (m_websocket.error())
    {
        qWarning() << m_websocket.error() << m_websocket.errorString();
    }
}

void ctWsTalk::wsClientClose()
{
    m_websocket.close();
}

void ctWsTalk::wsSendBinary(const QByteArray &binaryData)
{
    if (!m_bWsConnect)
        return;
    m_websocket.sendBinaryMessage(binaryData);
}

// 初始化麦克风输入
void ctWsTalk::initMicrophoneInput()
{
    // 配置音频输入参数
    QAudioFormat format;
    format.setSampleRate(8000);     // 设置采样率
    format.setChannelCount(1);      // 设置通道数
    format.setSampleSize(16);       // 设置样本大小
    format.setCodec("audio/pcm");   // 设置编解码器
    format.setByteOrder(QAudioFormat::LittleEndian);
    format.setSampleType(QAudioFormat::SignedInt);

    //选择设备作为输入源
    QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();
    qDebug() << "The name of the inputDeviceInfo: " << info.deviceName();

    //判断输入的格式是否支持,如果不支持就使用系统支持的默认格式
    if (!info.isFormatSupported(format))
    {
        format = info.nearestFormat(format);
    }
    //当前设备支持的编码
    qDebug() << "当前设备支持的编码格式:";
    QStringList list = info.supportedCodecs();
    for (int i = 0; i < list.size(); i++)
    {
        auto text = list.at(i) + " ";
        qDebug() << text;
    }
    qDebug() << "输入音频采样率=" << format.sampleRate();
    qDebug() << "输入音频通道数=" << format.channelCount();
    qDebug() << "输入音频样本大小=" << format.sampleSize();
    qDebug() << "输入音频编码格式=" << format.codec();

    // 创建音频输入对象
    if (!m_pAudioInput)
        m_pAudioInput = new QAudioInput(info, format);
    if (m_pAudioInput)
    {
        connect(m_pAudioInput, &QAudioInput::stateChanged, this, &ctWsTalk::handleStateChanged_input);
        m_pAudioInput->setNotifyInterval(17);
        connect(m_pAudioInput, &QAudioInput::notify, this, &ctWsTalk::handleAudioNotify);
        m_pInputDevice = m_pAudioInput->start();
        connect(m_pInputDevice, &QIODevice::readyRead, this, &ctWsTalk::handleAudioData);
    }
    m_nAudioSize = 0;
}

// 初始化扬声器输出
void ctWsTalk::initSpeakerOutput()
{
    // 获取默认音频输出设备
    QAudioDeviceInfo outputDeviceInfo = QAudioDeviceInfo::defaultOutputDevice();
    qDebug() << "The name of output device: " << outputDeviceInfo.deviceName();

    if (m_pAudioInput)
    {
        QAudioFormat format = m_pAudioInput->format();
        // 创建音频输出对象
        m_pAudioOutput = new QAudioOutput(outputDeviceInfo, format);
        if (m_pAudioOutput)
            m_pOutputDevice = m_pAudioOutput->start();
    }
}

void ctWsTalk::startIntercom()
{
    initMicrophoneInput();
    initSpeakerOutput();
}

void ctWsTalk::stopIntercom()
{
    if (m_pAudioInput) {
        m_pAudioInput->stop();
        m_pAudioInput->deleteLater();
        m_pAudioInput = nullptr;
    }
    if (m_pAudioOutput) {
        m_pAudioOutput->stop();
        m_pAudioOutput->deleteLater();
        m_pAudioOutput = nullptr;
    }
    if (m_pInputDevice)
        m_pInputDevice->close();
}

原文地址:https://blog.csdn.net/linyibin_123/article/details/142755632

免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!