Subsections

Realtime Audio and MIDI Input/Output

Blocking Functionality

Below is an example realtime audio output program that uses the RtWvOut class in a blocking context.
// rtsine.cpp
#include "SineWave.h"
#include "RtWvOut.h"
#include "Envelope.h"
#include "ADSR.h"
#include "FileWvOut.h"

using namespace stk;

int main()
{
  // Set the global sample rate before creating class instances.
  Stk::setSampleRate( 44100.0 );
  Stk::showWarnings( true );
  int nFrames = 100000;
  int releaseCount = (int) (0.9 * nFrames);
  float rampTime = (nFrames - releaseCount) / Stk::sampleRate();

  try {

    SineWave sine;
    RtWvOut dac( 1 );  // Define and open the default realtime output device for one-channel playback

    /* If you wanted to also create an output soundfile (also uncomment output.tick() below)
       FileWvOut output;
       output.openFile( "rttest", 1, FileWrite::FILE_WAV, Stk::STK_SINT16 );
    */

    /* Use ADSR */
    ADSR env;
    env.keyOn();
    env.setAllTimes( rampTime, rampTime, 0.7, rampTime ); // Attack time, decay time, sustain level, release time


    /* Or use a linear line segment envelope (and comment-out the ADSR lines above)
       Envelope env;
       env.keyOn();
       env.setTime( rampTime ); // Attack and release time
    */

    sine.setFrequency( 440.0 );

    // Single-sample computations
    StkFloat temp;
    for ( int i=0; i<nFrames; i++ ) {
      temp = env.tick() * sine.tick();
      dac.tick( temp );
      //output.tick( temp );
      if ( i == releaseCount ) env.keyOff();
    }
  }
  catch ( StkError & ) {
    exit( 1 );
  }

  return 0;
}

This example can be compiled on a Macintosh OS-X system with the following syntax, assuming the file rtsine.cpp is in the working directory as described above:

g++ -std=c++11 -Istk/include/ -Lstk/src/ -D__MACOSX_CORE__ rtsine.cpp -lstk -lpthread -framework CoreAudio -framework CoreMIDI -framework CoreFoundation

This example can be compiled on a Windows system (using MinGW and MSYS) with the following syntax, assuming the file rtsine.cpp is in the working directory as described above:

g++ -std=c++11 -o rtsine -Istk/include/ -Lstk/src/ -D__WINDOWS_DS__ rtsine.cpp -lstk -lpthread -ldsound -lole32 -lwinmm

Callback Functionality

Below are two examples of realtime audio output using the RtAudio class in a callback context.
// rtex1.cpp
//
// Realtime audio output example using callback functionality.

#include "RtAudio.h"
#include <iostream>
#include <cmath>

struct CallbackData {
  double phase;
  double phaseIncrement;

  // Default constructor.
  CallbackData()
    : phase(0.0), phaseIncrement(0.0) {}
};

int sin( void *outputBuffer, void *, unsigned int nBufferFrames,
         double, RtAudioStreamStatus, void *dataPointer )
{
  // Cast the buffer and data pointer to the correct data type.
  double *my_data = (double *) outputBuffer;
  CallbackData *data = (CallbackData *) dataPointer;

  // We know we only have 1 sample per frame here.
  for ( int i=0; i<nBufferFrames; i++ ) {
    my_data[i] = 0.8 * std::sin( data->phase );
    data->phase += data->phaseIncrement;
    if ( data->phase > 2 * M_PI ) data->phase -= 2 * M_PI;
  }

  return 0;
}

int main()
{
  unsigned int nBufferFrames = 256;  // 256 sample frames
  unsigned int sampleRate = 44100;
  unsigned int nChannels = 1;
  double frequency = 440.0;
  CallbackData data;

  RtAudio dac;
  std::vector<unsigned int> deviceIds = dac.getDeviceIds();
  if ( deviceIds.size() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 0 );
  }

  // Setup sinusoidal parameter for callback
  data.phaseIncrement = 2 * M_PI * frequency / sampleRate;

  // Open the default realtime output device.
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = nChannels;
  if ( dac.openStream( &parameters, NULL, RTAUDIO_FLOAT64, sampleRate, &nBufferFrames, &sin, (void *)&data ) ) {
    std::cout << '\n' << dac.getErrorText() << '\n' << std::endl;
    exit( 0 ); // problem with device settings
  }

  // Stream is open ... now start it.
  if ( dac.startStream() ) {
    std::cout << dac.getErrorText() << std::endl;
    goto cleanup;
  }

  char input;
  std::cout << "\nPlaying ... press <enter> to quit.\n";
  std::cin.get( input ); // block until user hits return

  // Block released ... stop the stream
  if ( dac.isStreamRunning() )
    dac.stopStream();  // or could call dac.abortStream();

  cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
 
  return 0;
}

This example can be compiled on a Macintosh OS-X system with the following syntax:

g++ -std=c++11 -Istk/include/ -Lstk/src/ -D__MACOSX_CORE__ rtex1.cpp -lstk -lpthread -framework CoreAudio -framework CoreMIDI -framework CoreFoundation

The previous example can be easily modified to take realtime input from a soundcard, pass it through a comb filter, and send it back out to the soundcard as follows:

// rtex2.cpp
//
// Realtime audio input/output example with comb filter using callback functionality.
#include "RtAudio.h"
#include <iostream>
#include "Delay.h"
using namespace stk;

int comb( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames,
          double, RtAudioStreamStatus, void *dataPointer )
{
  // Cast the buffers to the correct data type.
  double *idata = (double *) inputBuffer;
  double *odata = (double *) outputBuffer;
  Delay *delay = (Delay *) dataPointer;

  // We know we only have 1 sample per frame here.
  for ( int i=0; i<nBufferFrames; i++ ) {
    odata[i] = idata[i] + 0.99 * delay->lastOut(); // feedback comb
//    odata[i] = idata[i] + delay->tick( idata[i] ); // feedforward comb
    odata[i] *= 0.45;
    delay->tick( odata[i] );  // feedback comb
  }

  return 0;
}

int main()
{
  unsigned int nBufferFrames = 256;  // 256 sample frames
  unsigned int sampleRate = 48000;
  unsigned int nChannels = 1;

  RtAudio adac;
  std::vector<unsigned int> deviceIds = adac.getDeviceIds();
  if ( deviceIds.size() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 0 );
  }

  Delay delay( 10000, 10000 );

  // Open the default realtime input/output device.
  RtAudio::StreamParameters oParameters, iParameters;
  oParameters.deviceId = adac.getDefaultOutputDevice();
  iParameters.deviceId = adac.getDefaultInputDevice();
  oParameters.nChannels = nChannels;
  iParameters.nChannels = nChannels;
  if ( adac.openStream( &oParameters, &iParameters, RTAUDIO_FLOAT64, sampleRate, &nBufferFrames, &comb, (void *)&delay ) ) {
    std::cout << '\n' << adac.getErrorText() << '\n' << std::endl;
    exit( 0 ); // problem with device settings
  }

  // Stream is open ... now start it.
  if ( adac.startStream() ) {
    std::cout << adac.getErrorText() << std::endl;
    goto cleanup;
  }

  char input;
  std::cout << "\nPlaying ... press <enter> to quit.\n";
  std::cin.get( input ); // block until user hits return

  // Block released ... stop the stream
  if ( adac.isStreamRunning() )
    adac.stopStream();  // or could call adac.abortStream();

  cleanup:
  if ( adac.isStreamOpen() ) adac.closeStream();

  return 0;
}



McGill ©2004-2024 McGill University. All Rights Reserved.
Maintained by Gary P. Scavone.