Changeset 846 for trunk/demos/spectrum/app/engine.cpp
- Timestamp:
- May 5, 2011, 5:36:53 AM (14 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk
- Property svn:mergeinfo changed
/branches/vendor/nokia/qt/4.7.2 (added) merged: 845 /branches/vendor/nokia/qt/current merged: 844 /branches/vendor/nokia/qt/4.6.3 removed
- Property svn:mergeinfo changed
-
trunk/demos/spectrum/app/engine.cpp
r769 r846 1 1 /**************************************************************************** 2 2 ** 3 ** Copyright (C) 201 0Nokia Corporation and/or its subsidiary(-ies).3 ** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). 4 4 ** All rights reserved. 5 5 ** Contact: Nokia Corporation (qt-info@nokia.com) … … 86 86 , m_generateTone(false) 87 87 , m_file(0) 88 , m_analysisFile(0) 88 89 , m_availableAudioInputDevices 89 90 (QAudioDeviceInfo::availableDevices(QAudio::AudioInput)) … … 97 98 , m_audioOutput(0) 98 99 , m_playPosition(0) 100 , m_bufferPosition(0) 101 , m_bufferLength(0) 99 102 , m_dataLength(0) 103 , m_levelBufferLength(0) 100 104 , m_rmsLevel(0.0) 101 105 , m_peakLevel(0.0) 102 , m_spectrum LengthBytes(0)106 , m_spectrumBufferLength(0) 103 107 , m_spectrumAnalyser() 104 108 , m_spectrumPosition(0) … … 106 110 { 107 111 qRegisterMetaType<FrequencySpectrum>("FrequencySpectrum"); 112 qRegisterMetaType<WindowFunction>("WindowFunction"); 108 113 CHECKED_CONNECT(&m_spectrumAnalyser, 109 114 SIGNAL(spectrumChanged(FrequencySpectrum)), … … 133 138 bool Engine::loadFile(const QString &fileName) 134 139 { 140 reset(); 135 141 bool result = false; 136 m_generateTone = false;137 142 Q_ASSERT(!m_generateTone); 143 Q_ASSERT(!m_file); 138 144 Q_ASSERT(!fileName.isEmpty()); 139 Q_ASSERT(!m_file); 140 m_file = new QFile(fileName, this); 141 m_file->setFileName(fileName); 142 Q_ASSERT(m_file->exists()); 143 if (m_file->open(QFile::ReadOnly)) { 144 m_wavFile.readHeader(*m_file); 145 if (isPCMS16LE(m_wavFile.format())) { 145 m_file = new WavFile(this); 146 if (m_file->open(fileName)) { 147 if (isPCMS16LE(m_file->fileFormat())) { 146 148 result = initialize(); 147 149 } else { 148 150 emit errorMessage(tr("Audio format not supported"), 149 formatToString(m_ wavFile.format()));151 formatToString(m_file->fileFormat())); 150 152 } 151 153 } else { 152 154 emit errorMessage(tr("Could not open file"), fileName); 153 155 } 154 155 delete m_file;156 m_file = 0;157 156 if (result) { 157 m_analysisFile = new WavFile(this); 158 m_analysisFile->open(fileName); 159 } 158 160 return result; 159 161 } … … 161 163 bool Engine::generateTone(const Tone &tone) 162 164 { 165 reset(); 166 Q_ASSERT(!m_generateTone); 163 167 Q_ASSERT(!m_file); 164 168 m_generateTone = true; … … 173 177 bool Engine::generateSweptTone(qreal amplitude) 174 178 { 179 Q_ASSERT(!m_generateTone); 175 180 Q_ASSERT(!m_file); 176 181 m_generateTone = true; … … 186 191 bool Engine::initializeRecord() 187 192 { 193 reset(); 188 194 ENGINE_DEBUG << "Engine::initializeRecord"; 195 Q_ASSERT(!m_generateTone); 189 196 Q_ASSERT(!m_file); 190 197 m_generateTone = false; … … 193 200 } 194 201 195 qint64 Engine::bufferDuration() const 196 { 197 return BufferDurationUs; 198 } 199 200 qint64 Engine::dataDuration() const 201 { 202 qint64 result = 0; 203 if (QAudioFormat() != m_format) 204 result = audioDuration(m_format, m_dataLength); 205 return result; 206 } 207 208 qint64 Engine::audioBufferLength() const 209 { 210 qint64 length = 0; 211 if (QAudio::ActiveState == m_state || QAudio::IdleState == m_state) { 212 Q_ASSERT(QAudioFormat() != m_format); 213 switch (m_mode) { 214 case QAudio::AudioInput: 215 length = m_audioInput->bufferSize(); 216 break; 217 case QAudio::AudioOutput: 218 length = m_audioOutput->bufferSize(); 219 break; 220 } 221 } 222 return length; 202 qint64 Engine::bufferLength() const 203 { 204 return m_file ? m_file->size() : m_bufferLength; 223 205 } 224 206 … … 253 235 m_count = 0; 254 236 m_dataLength = 0; 255 emit data DurationChanged(0);237 emit dataLengthChanged(0); 256 238 m_audioInputIODevice = m_audioInput->start(); 257 239 CHECKED_CONNECT(m_audioInputIODevice, SIGNAL(readyRead()), … … 276 258 m_spectrumAnalyser.cancelCalculation(); 277 259 spectrumChanged(0, 0, FrequencySpectrum()); 278 279 260 setPlayPosition(0, true); 280 261 stopRecording(); … … 285 266 this, SLOT(audioNotify())); 286 267 m_count = 0; 287 m_audioOutputIODevice.close(); 288 m_audioOutputIODevice.setBuffer(&m_buffer); 289 m_audioOutputIODevice.open(QIODevice::ReadOnly); 290 m_audioOutput->start(&m_audioOutputIODevice); 268 if (m_file) { 269 m_file->seek(0); 270 m_bufferPosition = 0; 271 m_dataLength = 0; 272 m_audioOutput->start(m_file); 273 } else { 274 m_audioOutputIODevice.close(); 275 m_audioOutputIODevice.setBuffer(&m_buffer); 276 m_audioOutputIODevice.open(QIODevice::ReadOnly); 277 m_audioOutput->start(&m_audioOutputIODevice); 278 } 291 279 } 292 280 } … … 333 321 switch (m_mode) { 334 322 case QAudio::AudioInput: { 335 const qint64 recordPosition = 336 qMin(BufferDurationUs, m_audioInput->processedUSecs()); 323 const qint64 recordPosition = qMin(m_bufferLength, audioLength(m_format, m_audioInput->processedUSecs())); 337 324 setRecordPosition(recordPosition); 338 339 // Calculate level of most recently captured data 340 qint64 levelLength = audioLength(m_format, LevelWindowUs); 341 levelLength = qMin(m_dataLength, levelLength); 342 const qint64 levelPosition = m_dataLength - levelLength; 343 calculateLevel(levelPosition, levelLength); 344 345 // Calculate spectrum of most recently captured data 346 if (m_dataLength >= m_spectrumLengthBytes) { 347 const qint64 spectrumPosition = m_dataLength - m_spectrumLengthBytes; 325 const qint64 levelPosition = m_dataLength - m_levelBufferLength; 326 if (levelPosition >= 0) 327 calculateLevel(levelPosition, m_levelBufferLength); 328 if (m_dataLength >= m_spectrumBufferLength) { 329 const qint64 spectrumPosition = m_dataLength - m_spectrumBufferLength; 348 330 calculateSpectrum(spectrumPosition); 349 331 } 332 emit bufferChanged(0, m_dataLength, m_buffer); 350 333 } 351 334 break; 352 335 case QAudio::AudioOutput: { 353 const qint64 playPosition = 354 qMin(dataDuration(), m_audioOutput->processedUSecs()); 355 setPlayPosition(playPosition); 356 357 qint64 analysisPosition = audioLength(m_format, playPosition); 358 359 // Calculate level of data starting at current playback position 360 const qint64 levelLength = audioLength(m_format, LevelWindowUs); 361 if (analysisPosition + levelLength < m_dataLength) 362 calculateLevel(analysisPosition, levelLength); 363 364 if (analysisPosition + m_spectrumLengthBytes < m_dataLength) 365 calculateSpectrum(analysisPosition); 366 367 if (dataDuration() == playPosition) 368 stopPlayback(); 336 const qint64 playPosition = audioLength(m_format, m_audioOutput->processedUSecs()); 337 setPlayPosition(qMin(bufferLength(), playPosition)); 338 const qint64 levelPosition = playPosition - m_levelBufferLength; 339 const qint64 spectrumPosition = playPosition - m_spectrumBufferLength; 340 if (m_file) { 341 if (levelPosition > m_bufferPosition || 342 spectrumPosition > m_bufferPosition || 343 qMax(m_levelBufferLength, m_spectrumBufferLength) > m_dataLength) { 344 m_bufferPosition = 0; 345 m_dataLength = 0; 346 // Data needs to be read into m_buffer in order to be analysed 347 const qint64 readPos = qMax(qint64(0), qMin(levelPosition, spectrumPosition)); 348 const qint64 readEnd = qMin(m_analysisFile->size(), qMax(levelPosition + m_levelBufferLength, spectrumPosition + m_spectrumBufferLength)); 349 const qint64 readLen = readEnd - readPos + audioLength(m_format, WaveformWindowDuration); 350 qDebug() << "Engine::audioNotify [1]" 351 << "analysisFileSize" << m_analysisFile->size() 352 << "readPos" << readPos 353 << "readLen" << readLen; 354 if (m_analysisFile->seek(readPos + m_analysisFile->headerLength())) { 355 m_buffer.resize(readLen); 356 m_bufferPosition = readPos; 357 m_dataLength = m_analysisFile->read(m_buffer.data(), readLen); 358 qDebug() << "Engine::audioNotify [2]" << "bufferPosition" << m_bufferPosition << "dataLength" << m_dataLength; 359 } else { 360 qDebug() << "Engine::audioNotify [2]" << "file seek error"; 361 } 362 emit bufferChanged(m_bufferPosition, m_dataLength, m_buffer); 363 } 364 } else { 365 if (playPosition >= m_dataLength) 366 stopPlayback(); 367 } 368 if (levelPosition >= 0 && levelPosition + m_levelBufferLength < m_bufferPosition + m_dataLength) 369 calculateLevel(levelPosition, m_levelBufferLength); 370 if (spectrumPosition >= 0 && spectrumPosition + m_spectrumBufferLength < m_bufferPosition + m_dataLength) 371 calculateSpectrum(spectrumPosition); 369 372 } 370 373 break; … … 377 380 << "to" << state; 378 381 379 if (QAudio::StoppedState == state) { 380 // Check error 381 QAudio::Error error = QAudio::NoError; 382 switch (m_mode) { 383 case QAudio::AudioInput: 384 error = m_audioInput->error(); 385 break; 386 case QAudio::AudioOutput: 387 error = m_audioOutput->error(); 388 break; 389 } 390 if (QAudio::NoError != error) { 391 reset(); 392 return; 393 } 394 } 395 setState(state); 382 if (QAudio::IdleState == state && m_file && m_file->pos() == m_file->size()) { 383 stopPlayback(); 384 } else { 385 if (QAudio::StoppedState == state) { 386 // Check error 387 QAudio::Error error = QAudio::NoError; 388 switch (m_mode) { 389 case QAudio::AudioInput: 390 error = m_audioInput->error(); 391 break; 392 case QAudio::AudioOutput: 393 error = m_audioOutput->error(); 394 break; 395 } 396 if (QAudio::NoError != error) { 397 reset(); 398 return; 399 } 400 } 401 setState(state); 402 } 396 403 } 397 404 398 405 void Engine::audioDataReady() 399 406 { 407 Q_ASSERT(0 == m_bufferPosition); 400 408 const qint64 bytesReady = m_audioInput->bytesReady(); 401 409 const qint64 bytesSpace = m_buffer.size() - m_dataLength; … … 408 416 if (bytesRead) { 409 417 m_dataLength += bytesRead; 410 411 const qint64 duration = audioDuration(m_format, m_dataLength); 412 emit dataDurationChanged(duration); 418 emit dataLengthChanged(dataLength()); 413 419 } 414 420 … … 420 426 { 421 427 ENGINE_DEBUG << "Engine::spectrumChanged" << "pos" << m_spectrumPosition; 422 const qint64 positionUs = audioDuration(m_format, m_spectrumPosition); 423 const qint64 lengthUs = audioDuration(m_format, m_spectrumLengthBytes); 424 emit spectrumChanged(positionUs, lengthUs, spectrum); 428 emit spectrumChanged(m_spectrumPosition, m_spectrumBufferLength, spectrum); 425 429 } 426 430 … … 430 434 //----------------------------------------------------------------------------- 431 435 432 void Engine::reset() 433 { 434 stopRecording(); 435 stopPlayback(); 436 setState(QAudio::AudioInput, QAudio::StoppedState); 437 setFormat(QAudioFormat()); 436 void Engine::resetAudioDevices() 437 { 438 438 delete m_audioInput; 439 439 m_audioInput = 0; … … 443 443 m_audioOutput = 0; 444 444 setPlayPosition(0); 445 m_spectrumPosition = 0; 446 setLevel(0.0, 0.0, 0); 447 } 448 449 void Engine::reset() 450 { 451 stopRecording(); 452 stopPlayback(); 453 setState(QAudio::AudioInput, QAudio::StoppedState); 454 setFormat(QAudioFormat()); 455 m_generateTone = false; 456 delete m_file; 457 m_file = 0; 458 delete m_analysisFile; 459 m_analysisFile = 0; 445 460 m_buffer.clear(); 461 m_bufferPosition = 0; 462 m_bufferLength = 0; 446 463 m_dataLength = 0; 447 m_spectrumPosition = 0; 448 emit dataDurationChanged(0); 449 setLevel(0.0, 0.0, 0); 464 emit dataLengthChanged(0); 465 resetAudioDevices(); 450 466 } 451 467 … … 454 470 bool result = false; 455 471 456 reset();472 QAudioFormat format = m_format; 457 473 458 474 if (selectFormat()) { 459 const qint64 bufferLength = audioLength(m_format, BufferDurationUs); 460 m_buffer.resize(bufferLength); 461 m_buffer.fill(0); 462 emit bufferDurationChanged(BufferDurationUs); 463 464 if (m_generateTone) { 465 if (0 == m_tone.endFreq) { 466 const qreal nyquist = nyquistFrequency(m_format); 467 m_tone.endFreq = qMin(qreal(SpectrumHighFreq), nyquist); 475 if (m_format != format) { 476 resetAudioDevices(); 477 if (m_file) { 478 emit bufferLengthChanged(bufferLength()); 479 emit dataLengthChanged(dataLength()); 480 emit bufferChanged(0, 0, m_buffer); 481 setRecordPosition(bufferLength()); 482 result = true; 483 } else { 484 m_bufferLength = audioLength(m_format, BufferDurationUs); 485 m_buffer.resize(m_bufferLength); 486 m_buffer.fill(0); 487 emit bufferLengthChanged(bufferLength()); 488 if (m_generateTone) { 489 if (0 == m_tone.endFreq) { 490 const qreal nyquist = nyquistFrequency(m_format); 491 m_tone.endFreq = qMin(qreal(SpectrumHighFreq), nyquist); 492 } 493 // Call function defined in utils.h, at global scope 494 ::generateTone(m_tone, m_format, m_buffer); 495 m_dataLength = m_bufferLength; 496 emit dataLengthChanged(dataLength()); 497 emit bufferChanged(0, m_dataLength, m_buffer); 498 setRecordPosition(m_bufferLength); 499 result = true; 500 } else { 501 emit bufferChanged(0, 0, m_buffer); 502 m_audioInput = new QAudioInput(m_audioInputDevice, m_format, this); 503 m_audioInput->setNotifyInterval(NotifyIntervalMs); 504 result = true; 505 } 468 506 } 469 470 // Call function defined in utils.h, at global scope 471 ::generateTone(m_tone, m_format, m_buffer); 472 m_dataLength = m_buffer.size(); 473 emit dataDurationChanged(bufferDuration()); 474 setRecordPosition(bufferDuration()); 475 result = true; 476 } else if (m_file) { 477 const qint64 length = m_wavFile.readData(*m_file, m_buffer, m_format); 478 if (length) { 479 m_dataLength = length; 480 emit dataDurationChanged(dataDuration()); 481 setRecordPosition(dataDuration()); 482 result = true; 483 } 484 } else { 485 m_audioInput = new QAudioInput(m_audioInputDevice, m_format, this); 486 m_audioInput->setNotifyInterval(NotifyIntervalMs); 487 result = true; 488 } 489 490 m_audioOutput = new QAudioOutput(m_audioOutputDevice, m_format, this); 491 m_audioOutput->setNotifyInterval(NotifyIntervalMs); 492 m_spectrumLengthBytes = SpectrumLengthSamples * 493 (m_format.sampleSize() / 8) * m_format.channels(); 507 m_audioOutput = new QAudioOutput(m_audioOutputDevice, m_format, this); 508 m_audioOutput->setNotifyInterval(NotifyIntervalMs); 509 } 494 510 } else { 495 511 if (m_file) … … 502 518 } 503 519 520 ENGINE_DEBUG << "Engine::initialize" << "m_bufferLength" << m_bufferLength; 521 ENGINE_DEBUG << "Engine::initialize" << "m_dataLength" << m_dataLength; 504 522 ENGINE_DEBUG << "Engine::initialize" << "format" << m_format; 505 523 … … 511 529 bool foundSupportedFormat = false; 512 530 513 if (m_file) { 514 // Header is read from the WAV file; just need to check whether 515 // it is supported by the audio output device 516 QAudioFormat format = m_wavFile.format(); 517 if (m_audioOutputDevice.isFormatSupported(m_wavFile.format())) { 518 setFormat(m_wavFile.format()); 531 if (m_file || QAudioFormat() != m_format) { 532 QAudioFormat format = m_format; 533 if (m_file) 534 // Header is read from the WAV file; just need to check whether 535 // it is supported by the audio output device 536 format = m_file->fileFormat(); 537 if (m_audioOutputDevice.isFormatSupported(format)) { 538 setFormat(format); 519 539 foundSupportedFormat = true; 520 } else {521 // Try flipping mono <-> stereo522 const int channels = (format.channels() == 1) ? 2 : 1;523 format.setChannels(channels);524 if (m_audioOutputDevice.isFormatSupported(format)) {525 setFormat(format);526 foundSupportedFormat = true;527 }528 540 } 529 541 } else { … … 649 661 Q_UNUSED(length) 650 662 #else 651 Q_ASSERT(position + length <= m_ dataLength);663 Q_ASSERT(position + length <= m_bufferPosition + m_dataLength); 652 664 653 665 qreal peakLevel = 0.0; 654 666 655 667 qreal sum = 0.0; 656 const char *ptr = m_buffer.constData() + position ;668 const char *ptr = m_buffer.constData() + position - m_bufferPosition; 657 669 const char *const end = ptr + length; 658 670 while (ptr < end) { … … 680 692 Q_UNUSED(position) 681 693 #else 682 Q_ASSERT(position + m_spectrum LengthBytes <=m_dataLength);683 Q_ASSERT(0 == m_spectrum LengthBytes% 2); // constraint of FFT algorithm694 Q_ASSERT(position + m_spectrumBufferLength <= m_bufferPosition + m_dataLength); 695 Q_ASSERT(0 == m_spectrumBufferLength % 2); // constraint of FFT algorithm 684 696 685 697 // QThread::currentThread is marked 'for internal use only', but 686 698 // we're only using it for debug output here, so it's probably OK :) 687 699 ENGINE_DEBUG << "Engine::calculateSpectrum" << QThread::currentThread() 688 << "count" << m_count << "pos" << position << "len" << m_spectrum LengthBytes700 << "count" << m_count << "pos" << position << "len" << m_spectrumBufferLength 689 701 << "spectrumAnalyser.isReady" << m_spectrumAnalyser.isReady(); 690 702 691 703 if(m_spectrumAnalyser.isReady()) { 692 m_spectrumBuffer = QByteArray::fromRawData(m_buffer.constData() + position ,693 m_spectrum LengthBytes);704 m_spectrumBuffer = QByteArray::fromRawData(m_buffer.constData() + position - m_bufferPosition, 705 m_spectrumBufferLength); 694 706 m_spectrumPosition = position; 695 707 m_spectrumAnalyser.calculate(m_spectrumBuffer, m_format); … … 702 714 const bool changed = (format != m_format); 703 715 m_format = format; 716 m_levelBufferLength = audioLength(m_format, LevelWindowUs); 717 m_spectrumBufferLength = SpectrumLengthSamples * 718 (m_format.sampleSize() / 8) * m_format.channels(); 704 719 if (changed) 705 720 emit formatChanged(m_format);
Note:
See TracChangeset
for help on using the changeset viewer.