Tweak DSP optimization.

This commit is contained in:
Jonathan Moore Liles 2013-09-08 13:51:05 -07:00
parent fc5c59c61d
commit 00ed29d6c2
8 changed files with 259 additions and 246 deletions

View File

@ -108,7 +108,15 @@ AUX_Module::handle_sample_rate_change ( nframes_t n )
void
AUX_Module::process ( nframes_t nframes )
{
if ( !bypass() )
if ( unlikely( bypass() ) )
{
for ( unsigned int i = 0; i < audio_input.size(); ++i )
{
if ( audio_input[i].connected() )
buffer_fill_with_silence( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), nframes );
}
}
else
{
float gt = DB_CO( control_input[0].control_value() );
@ -116,9 +124,8 @@ AUX_Module::process ( nframes_t nframes )
bool use_gainbuf = smoothing.apply( gainbuf, nframes, gt );
if ( use_gainbuf )
if ( unlikely( use_gainbuf ) )
{
for ( unsigned int i = 0; i < audio_input.size(); ++i )
{
if ( audio_input[i].connected() )
@ -135,14 +142,6 @@ AUX_Module::process ( nframes_t nframes )
}
}
}
else
{
for ( unsigned int i = 0; i < audio_input.size(); ++i )
{
if ( audio_input[i].connected() )
buffer_fill_with_silence( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), nframes );
}
}
}
void

View File

@ -105,30 +105,38 @@ Gain_Module::handle_sample_rate_change ( nframes_t n )
void
Gain_Module::process ( nframes_t nframes )
{
const float gt = DB_CO( control_input[1].control_value() ? -90.f : control_input[0].control_value() );
sample_t gainbuf[nframes];
bool use_gainbuf = smoothing.apply( gainbuf, nframes, gt );
if ( use_gainbuf )
if ( unlikely( bypass() ) )
{
for ( int i = audio_input.size(); i--; )
{
if ( audio_input[i].connected() && audio_output[i].connected() )
{
sample_t *out = (sample_t*)audio_input[i].buffer();
buffer_apply_gain_buffer( out, gainbuf, nframes );
}
}
/* nothing to do */
}
else
for ( int i = audio_input.size(); i--; )
{
const float gt = DB_CO( control_input[1].control_value() ? -90.f : control_input[0].control_value() );
sample_t gainbuf[nframes];
bool use_gainbuf = smoothing.apply( gainbuf, nframes, gt );
if ( unlikely( use_gainbuf ) )
{
if ( audio_input[i].connected() && audio_output[i].connected() )
for ( int i = audio_input.size(); i--; )
{
buffer_apply_gain( (sample_t*)audio_input[i].buffer(), nframes, gt );
if ( audio_input[i].connected() && audio_output[i].connected() )
{
sample_t *out = (sample_t*)audio_input[i].buffer();
buffer_apply_gain_buffer( out, gainbuf, nframes );
}
}
}
else
for ( int i = audio_input.size(); i--; )
{
if ( audio_input[i].connected() && audio_output[i].connected() )
{
buffer_apply_gain( (sample_t*)audio_input[i].buffer(), nframes, gt );
}
}
}
}

View File

@ -174,14 +174,11 @@ Meter_Module::process ( nframes_t nframes )
{
for ( unsigned int i = 0; i < audio_input.size(); ++i )
{
if ( audio_input[i].connected() )
{
// float dB = 20 * log10( get_peak_sample( (float*)audio_input[i].buffer(), nframes ) / 2.0f );
float dB = 20 * log10( buffer_get_peak( (sample_t*) audio_input[i].buffer(), nframes ) );
float dB = 20 * log10( buffer_get_peak( (sample_t*) audio_input[i].buffer(), nframes ) );
((float*)control_output[0].buffer())[i] = dB;
if (dB > control_value[i])
control_value[i] = dB;
}
((float*)control_output[0].buffer())[i] = dB;
if (dB > control_value[i])
control_value[i] = dB;
}
}

View File

@ -80,49 +80,42 @@ Mono_Pan_Module::configure_inputs ( int )
void
Mono_Pan_Module::process ( nframes_t nframes )
{
if ( audio_input[0].connected() &&
audio_output[0].connected() &&
audio_output[1].connected() )
if ( unlikely( bypass() ) )
{
if ( bypass() )
buffer_copy( (sample_t*)audio_output[1].buffer(), (sample_t*)audio_input[0].buffer(), nframes );
}
else
{
const float gt = (control_input[0].control_value() + 1.0f) * 0.5f;
sample_t gainbuf[nframes];
bool use_gainbuf = smoothing.apply( gainbuf, nframes, gt );
if ( unlikely( use_gainbuf ) )
{
buffer_copy( (sample_t*)audio_output[1].buffer(), (sample_t*)audio_input[0].buffer(), nframes );
/* right channel */
buffer_copy_and_apply_gain_buffer( (sample_t*)audio_output[1].buffer(),
(sample_t*)audio_input[0].buffer(),
gainbuf,
nframes );
/* left channel */
for ( nframes_t i = 0; i < nframes; i++ )
gainbuf[i] = 1.0f - gainbuf[i];
buffer_apply_gain_buffer( (sample_t*)audio_output[0].buffer(), gainbuf, nframes );
}
else
{
const float gt = (control_input[0].control_value() + 1.0f) * 0.5f;
/* right channel */
buffer_copy_and_apply_gain( (sample_t*)audio_output[1].buffer(),
(sample_t*)audio_input[0].buffer(),
nframes,
gt );
if ( ! smoothing.target_reached( gt ) )
{
sample_t gainbuf[nframes];
smoothing.apply( gainbuf, nframes, gt );
/* right channel */
buffer_copy_and_apply_gain_buffer( (sample_t*)audio_output[1].buffer(),
(sample_t*)audio_input[0].buffer(),
gainbuf,
nframes );
/* left channel */
for ( nframes_t i = 0; i < nframes; i++ )
gainbuf[i] = 1.0f - gainbuf[i];
buffer_apply_gain_buffer( (sample_t*)audio_output[0].buffer(), gainbuf, nframes );
}
else
{
/* right channel */
buffer_copy_and_apply_gain( (sample_t*)audio_output[1].buffer(),
(sample_t*)audio_input[0].buffer(),
nframes,
gt );
/* left channel */
buffer_apply_gain( (sample_t*)audio_output[0].buffer(), nframes, 1.0f - gt);
}
/* left channel */
buffer_apply_gain( (sample_t*)audio_output[0].buffer(), nframes, 1.0f - gt);
}
}
}

View File

@ -795,12 +795,7 @@ Plugin_Module::process ( nframes_t nframes )
{
handle_port_connection_change();
if ( !bypass() )
{
for ( unsigned int i = 0; i < _idata->handle.size(); ++i )
_idata->descriptor->run( _idata->handle[i], nframes );
}
else
if ( unlikely( bypass() ) )
{
/* If this is a mono to stereo plugin, then duplicate the input channel... */
/* There's not much we can do to automatically support other configurations. */
@ -808,9 +803,16 @@ Plugin_Module::process ( nframes_t nframes )
{
buffer_copy( (sample_t*)audio_output[1].buffer(), (sample_t*)audio_input[0].buffer(), nframes );
}
}
_latency = get_plugin_latency();
_latency = 0;
}
else
{
for ( unsigned int i = 0; i < _idata->handle.size(); ++i )
_idata->descriptor->run( _idata->handle[i], nframes );
_latency = get_plugin_latency();
}
}

View File

@ -595,169 +595,166 @@ Spatializer_Module::draw ( void )
void
Spatializer_Module::process ( nframes_t nframes )
{
if ( !bypass() )
{
float azimuth = control_input[0].control_value();
float elevation = control_input[1].control_value();
float radius = control_input[2].control_value();
float highpass_freq = control_input[3].control_value();
float width = control_input[4].control_value();
float angle = control_input[5].control_value();
float azimuth = control_input[0].control_value();
float elevation = control_input[1].control_value();
float radius = control_input[2].control_value();
float highpass_freq = control_input[3].control_value();
float width = control_input[4].control_value();
float angle = control_input[5].control_value();
// bool more_options = control_input[6].control_value();
bool speed_of_sound = control_input[7].control_value() > 0.5f;
float late_gain = DB_CO( control_input[8].control_value() );
float early_gain = DB_CO( control_input[9].control_value() );
bool speed_of_sound = control_input[7].control_value() > 0.5f;
float late_gain = DB_CO( control_input[8].control_value() );
float early_gain = DB_CO( control_input[9].control_value() );
control_input[3].hints.visible = highpass_freq != 0.0f;
control_input[3].hints.visible = highpass_freq != 0.0f;
float delay_seconds = 0.0f;
float delay_seconds = 0.0f;
if ( speed_of_sound && radius > 1.0f )
delay_seconds = ( radius - 1.0f ) / 340.29f;
if ( speed_of_sound && radius > 1.0f )
delay_seconds = ( radius - 1.0f ) / 340.29f;
/* direct sound follows inverse square law */
/* but it's just the inverse as far as SPL goes */
/* direct sound follows inverse square law */
/* but it's just the inverse as far as SPL goes */
/* let's not go nuts... */
if ( radius < 0.01f )
radius = 0.01f;
/* let's not go nuts... */
if ( radius < 0.01f )
radius = 0.01f;
float gain = 1.0f / radius;
float gain = 1.0f / radius;
/* float cutoff_frequency = gain * LOWPASS_FREQ; */
/* float cutoff_frequency = gain * LOWPASS_FREQ; */
sample_t gainbuf[nframes];
sample_t delaybuf[nframes];
sample_t gainbuf[nframes];
sample_t delaybuf[nframes];
bool use_gainbuf = false;
bool use_delaybuf = delay_smoothing.apply( delaybuf, nframes, delay_seconds );
bool use_gainbuf = false;
bool use_delaybuf = delay_smoothing.apply( delaybuf, nframes, delay_seconds );
for ( unsigned int i = 0; i < audio_input.size(); i++ )
{
sample_t *buf = (sample_t*) audio_input[i].buffer();
for ( unsigned int i = 0; i < audio_input.size(); i++ )
{
sample_t *buf = (sample_t*) audio_input[i].buffer();
/* frequency effects */
_highpass[i]->run_highpass( buf, highpass_freq, nframes );
/* frequency effects */
_highpass[i]->run_highpass( buf, highpass_freq, nframes );
/* send to late reverb */
if ( i == 0 )
buffer_copy( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );
else
buffer_mix( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );
}
{
use_gainbuf = late_gain_smoothing.apply( gainbuf, nframes, late_gain );
/* gain effects */
if ( use_gainbuf )
buffer_apply_gain_buffer( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), gainbuf, nframes );
else
buffer_apply_gain( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), nframes, late_gain );
}
float early_angle = azimuth - angle;
if ( early_angle > 180.0f )
early_angle = -180 - ( early_angle - 180 );
else if ( early_angle < -180.0f )
early_angle = 180 - ( early_angle + 180 );
/* send to early reverb */
if ( audio_input.size() == 1 )
{
_early_panner->run_mono( (sample_t*)audio_input[0].buffer(),
(sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
azimuth + angle,
elevation,
nframes );
}
/* send to late reverb */
if ( i == 0 )
buffer_copy( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );
else
{
_early_panner->run_stereo( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_input[1].buffer(),
(sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
azimuth + angle,
elevation,
width,
nframes );
}
buffer_mix( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );
{
use_gainbuf = early_gain_smoothing.apply( gainbuf, nframes, early_gain );
}
for ( int i = 1; i < 5; i++ )
{
/* gain effects */
if ( use_gainbuf )
buffer_apply_gain_buffer( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), gainbuf, nframes );
else
buffer_apply_gain( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), nframes, early_gain );
}
}
{
use_gainbuf = late_gain_smoothing.apply( gainbuf, nframes, late_gain );
float corrected_angle = fabs( angle ) - (fabs( width ) * 0.5f);
if ( corrected_angle < 0.0f )
corrected_angle = 0.0f;
float cutoff_frequency = ( 1.0f / ( 1.0f + corrected_angle ) ) * 300000.0f;
use_gainbuf = gain_smoothing.apply( gainbuf, nframes, gain );
for ( unsigned int i = 0; i < audio_input.size(); i++ )
{
/* gain effects */
if ( use_gainbuf )
buffer_apply_gain_buffer( (sample_t*)audio_input[i].buffer(), gainbuf, nframes );
else
buffer_apply_gain( (sample_t*)audio_input[i].buffer(), nframes, gain );
/* frequency effects */
_lowpass[i]->run_lowpass( (sample_t*)audio_input[i].buffer(), cutoff_frequency, nframes );
/* delay effects */
if ( speed_of_sound )
{
if ( use_delaybuf )
_delay[i]->run( (sample_t*)audio_input[i].buffer(), delaybuf, 0, nframes );
else
_delay[i]->run( (sample_t*)audio_input[i].buffer(), 0, delay_seconds, nframes );
}
}
/* now do direct outputs */
if ( audio_input.size() == 1 )
{
_panner->run_mono( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_output[0].buffer(),
(sample_t*)audio_output[1].buffer(),
(sample_t*)audio_output[2].buffer(),
(sample_t*)audio_output[3].buffer(),
azimuth,
elevation,
nframes );
}
/* gain effects */
if ( unlikely( use_gainbuf ) )
buffer_apply_gain_buffer( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), gainbuf, nframes );
else
{
_panner->run_stereo( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_input[1].buffer(),
(sample_t*)audio_output[0].buffer(),
(sample_t*)audio_output[1].buffer(),
(sample_t*)audio_output[2].buffer(),
(sample_t*)audio_output[3].buffer(),
azimuth,
buffer_apply_gain( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), nframes, late_gain );
}
float early_angle = azimuth - angle;
if ( early_angle > 180.0f )
early_angle = -180 - ( early_angle - 180 );
else if ( early_angle < -180.0f )
early_angle = 180 - ( early_angle + 180 );
/* send to early reverb */
if ( audio_input.size() == 1 )
{
_early_panner->run_mono( (sample_t*)audio_input[0].buffer(),
(sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
azimuth + angle,
elevation,
width,
nframes );
}
else
{
_early_panner->run_stereo( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_input[1].buffer(),
(sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
(sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
azimuth + angle,
elevation,
width,
nframes );
}
{
use_gainbuf = early_gain_smoothing.apply( gainbuf, nframes, early_gain );
for ( int i = 1; i < 5; i++ )
{
/* gain effects */
if ( unlikely( use_gainbuf ) )
buffer_apply_gain_buffer( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), gainbuf, nframes );
else
buffer_apply_gain( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), nframes, early_gain );
}
}
float corrected_angle = fabs( angle ) - (fabs( width ) * 0.5f);
if ( corrected_angle < 0.0f )
corrected_angle = 0.0f;
float cutoff_frequency = ( 1.0f / ( 1.0f + corrected_angle ) ) * 300000.0f;
use_gainbuf = gain_smoothing.apply( gainbuf, nframes, gain );
for ( unsigned int i = 0; i < audio_input.size(); i++ )
{
/* gain effects */
if ( unlikely( use_gainbuf ) )
buffer_apply_gain_buffer( (sample_t*)audio_input[i].buffer(), gainbuf, nframes );
else
buffer_apply_gain( (sample_t*)audio_input[i].buffer(), nframes, gain );
/* frequency effects */
_lowpass[i]->run_lowpass( (sample_t*)audio_input[i].buffer(), cutoff_frequency, nframes );
/* delay effects */
if ( likely( speed_of_sound ) )
{
if ( unlikely( use_delaybuf ) )
_delay[i]->run( (sample_t*)audio_input[i].buffer(), delaybuf, 0, nframes );
else
_delay[i]->run( (sample_t*)audio_input[i].buffer(), 0, delay_seconds, nframes );
}
}
/* now do direct outputs */
if ( audio_input.size() == 1 )
{
_panner->run_mono( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_output[0].buffer(),
(sample_t*)audio_output[1].buffer(),
(sample_t*)audio_output[2].buffer(),
(sample_t*)audio_output[3].buffer(),
azimuth,
elevation,
nframes );
}
else
{
_panner->run_stereo( (sample_t*)audio_input[0].buffer(),
(sample_t*)audio_input[1].buffer(),
(sample_t*)audio_output[0].buffer(),
(sample_t*)audio_output[1].buffer(),
(sample_t*)audio_output[2].buffer(),
(sample_t*)audio_output[3].buffer(),
azimuth,
elevation,
width,
nframes );
}
}
void

View File

@ -46,17 +46,21 @@ buffer_apply_gain ( sample_t * __restrict__ buf, nframes_t nframes, float g )
{
sample_t * buf_ = (sample_t*) assume_aligned(buf);
if ( g != 1.0f )
while ( nframes-- )
*(buf_++) *= g;
if ( g == 1.0f )
return;
for ( nframes_t i = 0; i < nframes; i++ )
buf_[i] *= g;
}
void
buffer_apply_gain_unaligned ( sample_t * __restrict__ buf, nframes_t nframes, float g )
{
if ( g != 1.0f )
while ( nframes-- )
*(buf++) *= g;
if ( g == 1.0f )
return;
for ( nframes_t i = 0; i < nframes; i++ )
buf[i] *= g;
}
void
@ -65,8 +69,8 @@ buffer_apply_gain_buffer ( sample_t * __restrict__ buf, const sample_t * __restr
sample_t * buf_ = (sample_t*) assume_aligned(buf);
const sample_t * gainbuf_ = (const sample_t*) assume_aligned(gainbuf);
while ( nframes-- )
*(buf_++) *= *(gainbuf_++);
for ( nframes_t i = 0; i < nframes; i++ )
buf_[i] *= gainbuf_[i];
}
void
@ -76,8 +80,8 @@ buffer_copy_and_apply_gain_buffer ( sample_t * __restrict__ dst, const sample_t
const sample_t * src_ = (const sample_t*) assume_aligned(src);
const sample_t * gainbuf_ = (const sample_t*) assume_aligned(gainbuf);
while ( nframes-- )
*(dst_++) = *(src_++) * *(gainbuf_++);
for ( nframes_t i = 0; i < nframes; i++ )
dst_[i] = src_[i] * gainbuf_[i];
}
void
@ -86,8 +90,8 @@ buffer_mix ( sample_t * __restrict__ dst, const sample_t * __restrict__ src, nfr
sample_t * dst_ = (sample_t*) assume_aligned(dst);
const sample_t * src_ = (const sample_t*) assume_aligned(src);
while ( nframes-- )
*(dst_++) += *(src_++);
for ( nframes_t i = 0; i < nframes; i++ )
dst_[i] += src_[i];
}
void
@ -96,8 +100,8 @@ buffer_mix_with_gain ( sample_t * __restrict__ dst, const sample_t * __restrict_
sample_t * dst_ = (sample_t*) assume_aligned(dst);
const sample_t * src_ = (const sample_t*) assume_aligned(src);
while ( nframes-- )
*(dst_++) += *(src_++) * g;
for ( nframes_t i = 0; i < nframes; i++ )
dst_[i] += src_[i] * g;
}
void
@ -181,8 +185,10 @@ buffer_is_digital_black ( sample_t *buf, nframes_t nframes )
{
while ( nframes-- )
{
if ( 0 != buf[nframes] )
return false;
if (! *(buf++) )
continue;
return false;
}
return true;
@ -193,15 +199,19 @@ buffer_get_peak ( const sample_t * __restrict__ buf, nframes_t nframes )
{
const sample_t * buf_ = (const sample_t*) assume_aligned(buf);
float p = 0.0f;
float pmax = 0.0f;
float pmin = 0.0f;
while ( nframes-- )
for ( nframes_t i = 0; i < nframes; i++ )
{
const float s = fabs(*(buf_++));
p = s > p ? s : p;
pmax = buf_[i] > pmax ? buf_[i] : pmax;
pmin = buf_[i] < pmin ? buf_[i] : pmin;
}
return p;
pmax = fabsf(pmax);
pmin = fabsf(pmin);
return pmax > pmin ? pmax : pmin;
}
void

View File

@ -79,3 +79,10 @@ static inline float interpolate_cubic ( const float fr, const float inm1, const
#define DEG2RAD 0.01745329251f
#define ONEOVERSQRT2 0.70710678118f
#ifndef likely
#define likely(x) __builtin_expect(x,1)
#endif
#ifndef unlikely
#define unlikely(x) __builtin_expect(x,0)
#endif