際際滷

際際滷Share a Scribd company logo
Audio DSP
In ReasonML
Hi Im Ken
@ken_wheeler
AUDIO DSP
DIGITAL SIGNAL
PROCESSING
 Audio Processing/Synthesis

 Statistics

 Medical Data

 Image/Video Processing

 ETC
DIGITAL SIGNAL
PROCESSING
WERE GONNA
BUILD A SYNTH
How do you create
sound, digitally?
[ ]
 The number of samples (data points) of audio per second

 The granularity/resolution of your signal

 44.1kHz / 44100Hz = 44100 samples per second
Sample Rate
Audio DSP in ReasonML
 The number of samples for sound card to process at one
time

 Bu鍖er === too small, computer cant 鍖ll the bu鍖ers fast
enough, get glitching

 Bu鍖er === too big, can experience latency in real time
performance (i.e: pressing a key on a MIDI keyboard)
Buffer Size
[ ]
[ ]
Sample Rate: 44100
Bu鍖er Size: 1024
Sample Rate: 44100
Bu鍖er Size: 1024
Sample Rate: 44100
Bu鍖er Size: 1024
43.06 per second
Playing native audio with
ReasonML
Audio DSP in ReasonML
openPortaudio;
openBigarray;
Portaudio.init();
letdeviceId=Portaudio.get_default_output_device();
letdevice=Portaudio.get_device_info(deviceId);
letoutparam=
Some({
channels:2,
device:deviceId,
sample_format:format_float32,
latency:device.d_default_low_output_latency,
});
letstream=open_stream(None,outparam,sampleRate,bufferSize,[]);
letdims=[|2*bufferSize|];
letba=Genarray.create(float32,c_layout,dims);
while(playing^===true){
fill_ba(ba,dispatch,appStateRef^);
Portaudio.write_stream_ba(stream,ba,0,bufferSize);
};
FILLING OUR
BUFFERS
letmtime=ref(0.0);
letmdelta=1./.sampleRate;
letfill_ba=(ba)=>{
/*Resetmtimeifitgetstoobig*/
if(mtime^>Float.max_float){
mtime:=0.;
};
 /*GetAudioData*/
letdata=getData(mtime);
/*Incrementtimebysample*/
mtime:=mtime^+.mdelta;
/*Getstereochannelarrayindexes*/
letleft=[|2*i|];
letright=[|2*i+1|];
/*Setthedataattheindex*/
Genarray.set(ba,left,data);
Genarray.set(ba,right,data);
};
WAVEFORMS
Audio DSP in ReasonML
OSCILLATORS
Oscillators emit waveform
values over time
Frequency (pitch) determines
how fast it oscillates
Audio DSP in ReasonML
Time determines where in
the cycle (period) you are
(phase)
Audio DSP in ReasonML
Period: One full oscillation cycle
Lets make a sine
wave
letgetSine=(frequency,gain,phase,mtime)
=>
sin(
Float.pi*.2.*.frequency*.mtime+.phase
)*.gain;
Gain: multiplier that makes
the signal louder or softer
(volume)
Square Wave
letgetSquare=(frequency,gain,mtime)=>
letfpt=1./.freq;
lethpt=fpt/.2.;
letlt=mod_float(mtime,fpt);
(lt<hpt?1.0:(-1.0))*.gain
Saw Wave
letgetSaw=(frequency,gain,mtime)=>
letfpt=1./.freq;
lethpt=fpt/.2.;
letlt=mod_float(mtime,fpt);
(lt/.fpt*.2.-.1.0)*.gain
How can we combine two
waves to make a new
sound?
You literally add them
together.
letdataPoint=sineWaveDataPoint
+sawWaveDataPoint;
So how do we shape our
sound?
ENVELOPE
Envelopes change amplitude
over time to shape a signal
Attack - Initial ramp up of amplitude after Note On
Decay - Initial step down of amplitude
Sustain - Holds amplitude steady
Release - How long it takes for the amplitude to fade
out after Note O鍖
We can make an envelope
with a state machine
typestage=
|Off
|Attack
|Decay
|Sustain
|Release;
typeenvelope={
minimumLevel:float,
mutablecurrentStage:stage,
mutablecurrentLevel:float,
mutablemultiplier:float,
mutablecurrentSampleIndex:int,
mutablenextStageSampleIndex:int,
};
letcreate=()=>{
minimumLevel:0.0001,
currentStage:Off,
currentLevel:0.0001,
multiplier:0.1,
currentSampleIndex:0,
nextStageSampleIndex:0,
};
letenterStage=(env,nextStage,params)=>{
env.currentStage=nextStage;
env.currentSampleIndex=0;
env.nextStageSampleIndex=(
switch(nextStage){
|Off=>0
|Sustain=>0
|_=>
letidx=find(stageOrder,nextStage,~i=0);
int_of_float(params[idx]*.sampleRate);
}
);
switch(nextStage){
|Off=>
env.currentLevel=0.;
env.multiplier=1.0;
|Attack=>
env.currentLevel=env.minimumLevel;
calculateMultiplier(env,env.currentLevel,1.0,env.nextStageSample
Index);
|Decay=>
env.currentLevel=1.0;
calculateMultiplier(
env,
env.currentLevel,
max(params[3],env.minimumLevel),
env.nextStageSampleIndex,
);
|Sustain=>
env.currentLevel=params[3];
env.multiplier=1.0;
|Release=>
letidx=find(stageOrder,nextStage,~i=0);
int_of_float(params[idx]*.sampleRate);
}
);
switch(nextStage){
|Off=>
env.currentLevel=0.;
env.multiplier=1.0;
|Attack=>
env.currentLevel=env.minimumLevel;
calculateMultiplier(env,env.currentLevel,1.0,env.nextStageSample
Index);
|Decay=>
env.currentLevel=1.0;
calculateMultiplier(
env,
env.currentLevel,
max(params[3],env.minimumLevel),
env.nextStageSampleIndex,
);
|Sustain=>
env.currentLevel=params[3];
env.multiplier=1.0;
|Release=>
calculateMultiplier(
env,
env.currentLevel,
env.minimumLevel,
env.nextStageSampleIndex,
)
};
();
};
letnextSample=(env,params)=>{
switch(env.currentStage){
|Off=>()
|Sustain=>()
|_=>
if(env.currentSampleIndex==env.nextStageSampleIndex){
letcurrentStageIndex=find(stageOrder,env.currentStage,~i
=0);
letnewStage=(currentStageIndex+1)mod5;
enterStage(env,stageOrder[newStage],params);
};
env.currentLevel=env.currentLevel*.env.multiplier;
env.currentSampleIndex=env.currentSampleIndex+1;
};
env.currentLevel;
};
Audio DSP in ReasonML
Envelopes are typically
triggered by MIDI or a
sequencer
I dont have PortMIDI bindings, so I
built a sequencer:
Step Sequencer
Tempo (Beats Per Minute)
Beat (1/4 Note)
Step (1/16th Note)
How can we sequence our
sounds?
letsteps=[|1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0|];
letcurrentSample=ref(0.0);
letfill_ba=(ba)=>{
letbeatMs=60000./.tempo;
letbeatSeconds=beatMs/.1000.;
letstepSeconds=beatS/.4.;
letstep=floor(stepSec*.sampleRate);
if(mod_float(currentSample^,step)==0.0){
/*getcurrentsequencerstepfortime*/
letstepIndex=int_of_float(currentSample^/.step);
if(stepIndex!==0&&steps[stepIndex-1]===1){
 Envelope.enterStage(env,Attack,params);
};
};
/*Resetthesequencerstepto0*/
if(currentSample^>step*.16.){
currentSample:=0.0;
};
}
How about some FX?
How to make a bitcrusher
 A bitcrusher is a combination of adjust bit depth and
downsampling

 Bit depth - resolution of our waves 

 Downsampling - reducing the sample rate
Bitcrusher
Bit Depth
Downsampling
letbitcrusher=Bitcrusher.create(16.,12);
letdata=(getWaveData()*.gain)
|>Bitcrusher.process(bitcrusher);
typebitcrusher={
mutablebitDepth:float,
mutabledownSampling:int,
mutablelastSample:float,
mutablecounter:int,
};
letcreate=(bitDepth,downSampling)=>{
bitDepth,
downSampling,
lastSample:0.,
counter:0,
};
letbitcrush=(v,bitDepth)=>{
letbd=bitDepth-.1.;
leti=ref(floor(v*.(-2.)**bd*.(-1
.)));
if(v>=1.0){
i:=2.**bd-.1.;
}elseif(v<=(-1.0)){
i:=(-2.)**bd;
};
letout=i^/.(-2.)**bd*.(-1.);
out;
};
letprocess=(b:bitcrusher,input:float)=>
{
b.counter=b.counter+1;
letout=ref(0.);
if(b.counter<b.downSampling){
out:=b.lastSample;
}else{
b.counter=0;
letsample=bitcrush(input,b.bitDepth);
b.lastSample=sample;
out:=sample;
};
out^;
};
How to make a delay
Delay
(Its basically an echo)
letbitcrusher=Bitcrusher.create(16.,12);
letdata=(getWaveData()*.gain)
|>Bitcrusher.process(bitcrusher)
|>Delay.process(delay);
Signal Chain
typedelay={
mutableduration:float,
mutablegain:float,
delayBuffer:array(float),
mutablecurrentSample:int,
};
letcreate=(duration,gain)=>{
duration,
gain,
delayBuffer:Array.make(int_of_float(sample
Rate*.2.),0.),
currentSample:0,
};
Delay Buffer
We use a circular bu鍖er to schedule the
current signal to play at a later time
letprocess=(d:delay,input:float)=>{
/*Maxbuffersize*/
letmax=int_of_float(sampleRate*.2.)-1;
/*Ifwe'reattheendofthesamplecount,reset*/
if(d.currentSample>max){
d.currentSample=0;
};
/*Howmanysamplesarewedelaying*/
letdelayIndex=int_of_float(sampleRate*.2.*.d.duration);
/*Getourinsertindex*/
letii=d.currentSample+delayIndex;
/
*Iftheindexexceedsthebufferlength,insertatbeginningwi
ththedifference*/
letinsertIndex=ii<max+1?ii:iimodmax;
/*Fillourdelaybuffer*/
d.delayBuffer[insertIndex]=input;
/*Mixdelaybufferintoinputsignal*/
letoutput=input+.d.delayBuffer[d.currentSample]*.d.gain
;
/*Incrementsamplecounter*/
d.currentSample=d.currentSample+1;
output;
};
DEMO TIME

More Related Content

Audio DSP in ReasonML