Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New video export #51

Merged
merged 3 commits into from
Nov 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 7 additions & 8 deletions core/src/main/kotlin/dev/matsem/astral/core/di/CoreModule.kt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import com.hamoid.VideoExport
import ddf.minim.AudioOutput
import ddf.minim.Minim
import ddf.minim.ugens.Sink
import dev.matsem.astral.core.VideoExportConfig
import dev.matsem.astral.core.tools.audio.AudioProcessor
import dev.matsem.astral.core.tools.audio.beatcounter.BeatCounter
import dev.matsem.astral.core.tools.galaxy.Galaxy
Expand All @@ -16,6 +15,8 @@ import dev.matsem.astral.core.tools.midi.MidiRecorder
import dev.matsem.astral.core.tools.osc.OscManager
import dev.matsem.astral.core.tools.pixelsort.PixelSorter
import dev.matsem.astral.core.tools.shapes.ExtrusionCache
import dev.matsem.astral.core.tools.videoexport.FFTSerializer
import dev.matsem.astral.core.tools.videoexport.VideoExporter
import org.jbox2d.common.Vec2
import org.koin.dsl.module
import processing.core.PApplet
Expand All @@ -36,7 +37,7 @@ val coreModule = module {
single { (get() as Minim).lineOut }
single { (get() as Minim).lineIn }
single { Sink().apply { patch(get() as AudioOutput) } }
single { AudioProcessor(get(), VideoExportConfig.IS_IN_RENDER_MODE) }
single { AudioProcessor(get()) }
factory { BeatCounter(get(), get()) }

// Extrusion
Expand All @@ -46,12 +47,10 @@ val coreModule = module {
// Effects
single { PixelSorter() }

single {
VideoExport(get()).apply {
setFrameRate(VideoExportConfig.MOVIE_FPS)
setAudioFileName(VideoExportConfig.AUDIO_FILE_PATH)
}
}
// VideoExporter
single { VideoExport(get()) }
single { FFTSerializer(get(), get()) }
factory { VideoExporter(get(), get(), get(), get()) }

factory {
Box2DProcessing(get()).apply {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,29 @@ import ddf.minim.AudioListener
import ddf.minim.analysis.BeatDetect
import ddf.minim.analysis.FFT

class AudioProcessor constructor(
private val lineIn: AudioInput,
private val isInRenderMode: Boolean
) : AudioListener {
/**
* This class handles everything live input audio-related.
* Use it to get current FFT values or BeatDetect data.
*/
class AudioProcessor constructor(private val lineIn: AudioInput) : AudioListener {

/**
* Operation mode. Affects behavior of [beatDetectData], [getRange] and [getFftAvg] methods.
* [Mode.LIVE] uses live AudioInput as source for mentioned methods.
* [Mode.MOCK] uses mocked FFT samples and BeatDetect data as source for mentioned methods. Pass the mocked data
* using the [mockFft] and [mockBeatDetect] methods.
*/
enum class Mode {
LIVE, MOCK
}

companion object {
const val FFT_BANDWIDTH = 22
const val FFT_OCTAVES = 3
const val BEAT_DETECT_SENTIVITY = 150
}

private var mode: Mode = Mode.LIVE
private var mockLeft = arrayListOf<Float>()
private var mockRight = arrayListOf<Float>()
private var beatDetectMock = BeatDetectData(false, false, false)
Expand All @@ -25,63 +43,104 @@ class AudioProcessor constructor(
}

init {
if (!isInRenderMode) {
lineIn.addListener(this)
setMode(Mode.LIVE)
}

/**
* Sets operation mode. Refer to [Mode] enum documentation.
*/
fun setMode(mode: Mode) {
this.mode = mode

when (mode) {
Mode.LIVE -> {
lineIn.addListener(this)
}
Mode.MOCK -> {
// Listener needs to be removed for MOCK mode,
// so the live audio input does not affect the exported video
lineIn.removeListener(this)
}
}
}

/**
* Software gain which affects FFT values.
*/
var gain = 1f

/**
* FFT object used for live audio input.
*/
val fft = FFT(lineIn.bufferSize(), lineIn.sampleRate()).apply {
logAverages(22, 3)
logAverages(FFT_BANDWIDTH, FFT_OCTAVES)
}

/**
* BeatDetect object used for live audio input.
*/
val beatDetect = BeatDetect(lineIn.bufferSize(), lineIn.sampleRate()).apply {
setSensitivity(150)
setSensitivity(BEAT_DETECT_SENTIVITY)
}

/**
* BeatDetect data which take [Mode] setting into account.
*/
val beatDetectData: BeatDetectData
get() = if (isInRenderMode) {
beatDetectMock
} else {
BeatDetectData(
get() = when (mode) {
Mode.LIVE -> BeatDetectData(
beatDetect.isKick,
beatDetect.isSnare,
beatDetect.isHat
)
Mode.MOCK -> beatDetectMock
}

/**
* Calculates average amplitude of FFT samples in given frequency [range].
*/
fun getRange(range: ClosedFloatingPointRange<Float>): Float {
if (isInRenderMode) {
val values = mutableListOf<Float>()
for (i in 0 until mockLeft.size) {
if (range.contains(fft.getAverageCenterFrequency(i))) {
values.add((mockLeft[i] + mockRight[i]) / 2f)
return when(mode) {
Mode.LIVE -> fft.calcAvg(range.start, range.endInclusive) * gain
Mode.MOCK -> {
val values = mutableListOf<Float>()
for (i in 0 until mockLeft.size) {
if (range.contains(fft.getAverageCenterFrequency(i))) {
values.add((mockLeft[i] + mockRight[i]) / 2f)
}
}
}

return values.average().toFloat() * gain
} else {
return fft.calcAvg(range.start, range.endInclusive) * gain
values.average().toFloat() * gain
}
}
}

/**
* Returns FFT average of band under index [i]. [i] should be in range 0 to [FFT_BANDWIDTH].
*/
fun getFftAvg(i: Int): Float {
return if (isInRenderMode) {
val l = mockLeft[i]
val r = mockRight[i]
(l + r) / 2f
} else {
fft.getAvg(i) * gain
return when(mode) {
Mode.LIVE -> fft.getAvg(i) * gain
Mode.MOCK -> {
val l = mockLeft[i]
val r = mockRight[i]
(l + r) / 2f
}
}
}

/**
* Provides mock FFT data for current frame while exporting video
*/
fun mockFft(left: List<Float>, right: List<Float>) {
mockLeft = ArrayList(left)
mockRight = ArrayList(right)
beatDetect.detect(left.toFloatArray())
}

/**
* Provides mock BeatDetect data for current frame while exporting video
*/
fun mockBeatDetect(data: BeatDetectData) {
beatDetectMock = data
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
package dev.matsem.astral.core.tools.videoexport

import ddf.minim.AudioSample
import ddf.minim.Minim
import ddf.minim.analysis.BeatDetect
import ddf.minim.analysis.FFT
import dev.matsem.astral.core.tools.audio.AudioProcessor
import processing.core.PApplet
import java.io.PrintWriter

/**
* This class generates FFT and BeatDetect analysis of music file using Minim library and then serializes this info
* to text file for later use by [VideoExporter] to mock the audio samples into exported movie.
*/
class FFTSerializer(private val parent: PApplet, private val minim: Minim) {

companion object {
/**
* Text separator used for FFT sample (de)serialization.
*/
const val SEP = "|"
}

/**
* Serializes FFT and BeatDetect analysis of provided [audioFilePath] audio file into text file at the same location
* as the [audioFilePath].
*/
fun serialize(audioFilePath: String) {
val output: PrintWriter = parent.createWriter(parent.dataPath("$audioFilePath.txt"))
val track = minim.loadSample(audioFilePath, 2048)

val fftSize = 1024
val sampleRate = track.sampleRate()

val beatDetect = BeatDetect(fftSize, sampleRate)

val fftSamplesL = FloatArray(fftSize)
val fftSamplesR = FloatArray(fftSize)

val samplesL = track.getChannel(AudioSample.LEFT)
val samplesR = track.getChannel(AudioSample.RIGHT)

val fftL = FFT(fftSize, sampleRate)
val fftR = FFT(fftSize, sampleRate)

fftL.logAverages(AudioProcessor.FFT_BANDWIDTH, AudioProcessor.FFT_OCTAVES)
fftR.logAverages(AudioProcessor.FFT_BANDWIDTH, AudioProcessor.FFT_OCTAVES)

val totalChunks = samplesL.size / fftSize + 1
val fftSlices = fftL.avgSize()

for (ci in 0 until totalChunks) {
val chunkStartIndex = ci * fftSize
val chunkSize = PApplet.min(samplesL.size - chunkStartIndex, fftSize)

System.arraycopy(samplesL, chunkStartIndex, fftSamplesL, 0, chunkSize)
System.arraycopy(samplesR, chunkStartIndex, fftSamplesR, 0, chunkSize)
if (chunkSize < fftSize) {
java.util.Arrays.fill(fftSamplesL, chunkSize, fftSamplesL.size - 1, 0.0f)
java.util.Arrays.fill(fftSamplesR, chunkSize, fftSamplesR.size - 1, 0.0f)
}

fftL.forward(fftSamplesL)
fftR.forward(fftSamplesR)
beatDetect.detect(fftSamplesL)

// The format of the saved txt file.
// The file contains many rows. Each row looks like this:
// T|B|L|R|L|R|L|R|... etc
// where T is the time in seconds and B is BeatDetect data
// Then we alternate left and right channel FFT values
// The first L and R values in each row are low frequencies (bass)
// and they go towards high frequency as we advance towards
// the end of the line.
val msg = StringBuilder(PApplet.nf(chunkStartIndex / sampleRate, 0, 3).replace(',', '.'))
val beat = when {
beatDetect.isKick -> 1
beatDetect.isSnare -> 2
beatDetect.isHat -> 3
else -> 0
}

msg.append(SEP + beat.toString())
for (i in 0 until fftSlices) {
msg.append(SEP + PApplet.nf(fftL.getAvg(i), 0, 4).replace(',', '.'))
msg.append(SEP + PApplet.nf(fftR.getAvg(i), 0, 4).replace(',', '.'))
}
output.println(msg.toString())
}

track.close()
output.flush()
output.close()
}
}
Loading