Skip to content

Commit

Permalink
Fix cuda compilation and output display bugs (#103)
Browse files Browse the repository at this point in the history
  • Loading branch information
MathieuFavreau authored and alexmo16 committed Oct 30, 2019
1 parent 702d572 commit d70c94e
Show file tree
Hide file tree
Showing 13 changed files with 216 additions and 137 deletions.
112 changes: 78 additions & 34 deletions c_plus_plus/src/model/stream/media_thread.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
#include "media_thread.h"

#include <cstring>
#include <iostream>

#include "model/audio_suppresser/audio_suppresser.h"
#include "model/classifier/classifier.h"
#include "model/stream/utils/alloc/heap_object_factory.h"
#include "model/stream/utils/models/circular_buffer.h"
#include "model/stream/utils/models/point.h"
#include "model/stream/video/dewarping/dewarping_helper.h"
#include "model/stream/video/video_stabilizer.h"
Expand Down Expand Up @@ -50,6 +52,20 @@ MediaThread::MediaThread(std::unique_ptr<IAudioSource> audioSource, std::unique_

void MediaThread::run()
{
// Utilitary objects
HeapObjectFactory heapObjectFactory;
DisplayImageBuilder displayImageBuilder(videoOutputConfig_.resolution);
VideoStabilizer videoStabilizer(videoInputConfig_.fpsTarget);

// Display images
Image emptyDisplay(videoOutputConfig_.resolution, videoOutputConfig_.imageFormat);
CircularBuffer<Image> displayBuffers(2, Image(videoOutputConfig_.resolution, videoOutputConfig_.imageFormat));

// Virtual cameras images
const Dim2<int>& maxVcDim = displayImageBuilder.getMaxVirtualCameraDim();
std::vector<Image> vcImages(1, RGBImage(maxVcDim.width, maxVcDim.height));
std::vector<Image> vcOutputFormatImages(1, Image(maxVcDim.width, maxVcDim.height, videoOutputConfig_.imageFormat));

// TODO: config?
const int classifierRangeThreshold = 2;

Expand All @@ -58,93 +74,115 @@ void MediaThread::run()

try
{
audioSource_->open();
audioSink_->open();
positionSource_->open();
// Allocate display images
heapObjectFactory.allocateObject(emptyDisplay);
heapObjectFactory.allocateObjectCircularBuffer(displayBuffers);

HeapObjectFactory heapObjectFactory;
DualBuffer<Image> displayBuffers(Image(videoOutputConfig_.resolution, videoOutputConfig_.imageFormat));
DisplayImageBuilder displayImageBuilder(videoOutputConfig_.resolution);
// Allocate virtual camera images
objectFactory_->allocateObjectVector(vcImages);
objectFactory_->allocateObjectVector(vcOutputFormatImages);

heapObjectFactory.allocateObjectDualBuffer(displayBuffers);
displayImageBuilder.setDisplayImageColor(displayBuffers.getCurrent());
displayImageBuilder.setDisplayImageColor(displayBuffers.getInUse());
// Set background color of empty display
displayImageBuilder.setDisplayImageColor(emptyDisplay);

Dim2<int> maxVcDim = displayImageBuilder.getMaxVirtualCameraDim();
std::vector<Image> vcImages(5, RGBImage(maxVcDim.width, maxVcDim.height));
objectFactory_->allocateObjectVector(vcImages);
// Start audio resources
audioSource_->open();
audioSink_->open();
positionSource_->open();

Point<float> fisheyeCenter(videoInputConfig_.resolution.width / 2.f, videoInputConfig_.resolution.height / 2.f);
std::vector<SphericalAngleRect> detections;

VideoStabilizer videoStabilizer(videoInputConfig_.fpsTarget);

// Media loop start

std::cout << "MediaThread loop started" << std::endl;

while (!isAbortRequested())
{
videoStabilizer.startFrame();

// Try to get queued detections
if (detectionQueue_->try_dequeue(detections))
{
virtualCameraManager_->updateVirtualCamerasGoal(detections);
}

// Update the position and size of virtual cameras
virtualCameraManager_->updateVirtualCameras(videoStabilizer.getLastFrameTimeMs());

// Read audio source and positions
int audioBytesRead = audioSource_->read(audioBuffer, sizeof(audioBuffer));
std::vector<SourcePosition> sourcePositions = positionSource_->getPositions();

// Read image from video input and convert it to rgb format for dewarping
const Image& rawImage = videoInput_->readImage();
const Image& rgbImage = imageBuffer_->getCurrent();
imageConverter_->convert(rawImage, rgbImage);
imageBuffer_->swap();

// Get the active virtual cameras
const std::vector<VirtualCamera> virtualCameras = virtualCameraManager_->getVirtualCameras();
int vcCount = (int)virtualCameras.size();
int vcCount = static_cast<int>(virtualCameras.size());

// If there are active virtual cameras, dewarp images of each vc and combine them in an output image
if (vcCount > 0)
{
// This should not happend often in theory, it's only if a large amount of virtual camera are required
// Dynamically allocate more virtual camera images
for (int i = vcImages.size(); i < vcCount; ++i)
{
RGBImage vcImage(maxVcDim.width, maxVcDim.height);
objectFactory_->allocateObject(vcImage);
vcImages.push_back(vcImage);

Image vcOutputFormatImage(maxVcDim.width, maxVcDim.height, videoOutputConfig_.imageFormat);
objectFactory_->allocateObject(vcOutputFormatImage);
vcOutputFormatImages.push_back(vcOutputFormatImage);
}

// Get the size of the virtual camera images to dewarp (this is to prevent resize in the output format)
Dim2<int> resizeDim(displayImageBuilder.getVirtualCameraDim(vcCount));
std::vector<Image> vcResizeImages(vcCount, RGBImage(resizeDim));
std::vector<Image> vcResizeOutputFormatImages(vcCount,
Image(resizeDim, videoOutputConfig_.imageFormat));

// Virtual camera dewarping loop
for (int i = 0; i < vcCount; ++i)
{
const VirtualCamera& virtualCamera = virtualCameras[i];
// Use the same buffers as vcImages for the smaller dewarped images
Image& vcResizeImage = vcResizeImages[i];
vcResizeImage.hostData = vcImages[i].hostData;
vcResizeImage.deviceData = vcImages[i].deviceData;

// Dewarping of virtual camera
const VirtualCamera& virtualCamera = virtualCameras[i];
DewarpingParameters vcParams =
getDewarpingParametersFromAngleBoundingBox(virtualCamera, fisheyeCenter, dewarpingConfig_);
dewarper_->dewarpImageFiltered(rgbImage, vcResizeImage, vcParams);

// Ok, this is a hack until we work with raw data for dewarping of virtual cameras (convert on
// itself, only work from RGB)
Image inImage = vcResizeImage;
vcResizeImages[i] = Image(inImage.width, inImage.height, videoOutputConfig_.imageFormat);
vcResizeImages[i].deviceData = inImage.deviceData;
vcResizeImages[i].hostData = inImage.hostData;
imageConverter_->convert(inImage, vcResizeImage);
// Use the same buffers as vcOutputFormatImages for the smaller dewarped (and converted) images
Image& vcResizeOutputFormatImage = vcResizeOutputFormatImages[i];
vcResizeOutputFormatImage.hostData = vcOutputFormatImages[i].hostData;
vcResizeOutputFormatImage.deviceData = vcOutputFormatImages[i].deviceData;

// Conversion from rgb to output format
imageConverter_->convert(vcResizeImage, vcResizeOutputFormatImage);
}

const Image& displayImage = displayBuffers.getCurrent();
displayImageBuilder.clearVirtualCamerasOnDisplayImage(displayImage);
// Clear the image before writting to it
const Image& displayImage = displayBuffers.current();
std::memcpy(displayImage.hostData, emptyDisplay.hostData, displayImage.size);

// Wait for dewarping to be completed
synchronizer_->sync();
displayImageBuilder.createDisplayImage(vcResizeImages, displayImage);

// Write to output image and send it to the video output
displayImageBuilder.createDisplayImage(vcResizeOutputFormatImages, displayImage);
videoOutput_->writeImage(displayImage);
displayBuffers.swap();
displayBuffers.next();
}
else
{
// If there are no active virtual cameras, just send an empty image
videoOutput_->writeImage(emptyDisplay);
}

if (audioBytesRead > 0)
Expand All @@ -166,23 +204,29 @@ void MediaThread::run()

detections.clear();

// If the frame took less than 1/fps, this call will block to match frame time of 1/fps
videoStabilizer.endFrame();
}

heapObjectFactory.deallocateObjectDualBuffer(displayBuffers);
objectFactory_->deallocateObjectVector(vcImages);
}
catch (const std::exception& e)
{
std::cout << e.what() << std::endl;
std::cout << "Error in video thread : " << e.what() << std::endl;
}

// Clean audio resources
audioSource_->close();
audioSink_->close();
positionSource_->close();

delete[] audioBuffer;

// Deallocate display images
heapObjectFactory.deallocateObject(emptyDisplay);
heapObjectFactory.deallocateObjectCircularBuffer(displayBuffers);

// Deallocate virtual camera images
objectFactory_->deallocateObjectVector(vcImages);
objectFactory_->deallocateObjectVector(vcOutputFormatImages);

std::cout << "MediaThread loop finished" << std::endl;
}
} // namespace Model
22 changes: 10 additions & 12 deletions c_plus_plus/src/model/stream/stream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ Stream::Stream(const VideoConfig& videoInputConfig, const VideoConfig& videoOutp
, dewarpingConfig_(dewarpingConfig)
, mediaThread_(nullptr)
, detectionThread_(nullptr)
, implementationFactory_(false)
{
bool useZeroCopyIfSupported = false;
int detectionDewarpingCount = 4;
float aspectRatio = 3.f / 4.f;
float minElevation = math::deg2rad(0.f);
Expand Down Expand Up @@ -57,24 +57,22 @@ Stream::Stream(const VideoConfig& videoInputConfig, const VideoConfig& videoOutp
std::string weightsFile = root + "/config/yolo/weights/yolov3-tiny.weights";
std::string metaFile = root + "/config/yolo/cfg/coco.data";

ImplementationFactory implementationFactory(useZeroCopyIfSupported);

objectFactory_ = implementationFactory.getDetectionObjectFactory();
objectFactory_ = implementationFactory_.getDetectionObjectFactory();
objectFactory_->allocateObjectLockTripleBuffer(*imageBuffer_);

detectionThread_ = std::make_unique<DetectionThread>(
imageBuffer_, implementationFactory.getDetector(configFile, weightsFile, metaFile), detectionQueue,
implementationFactory.getDetectionFisheyeDewarper(aspectRatio),
implementationFactory.getDetectionObjectFactory(), implementationFactory.getDetectionSynchronizer(),
imageBuffer_, implementationFactory_.getDetector(configFile, weightsFile, metaFile), detectionQueue,
implementationFactory_.getDetectionFisheyeDewarper(aspectRatio),
implementationFactory_.getDetectionObjectFactory(), implementationFactory_.getDetectionSynchronizer(),
dewarpingConfig_, detectionDewarpingCount);

mediaThread_ = std::make_unique<MediaThread>(
std::make_unique<OdasAudioSource>(10030), std::make_unique<PulseAudioSink>(audioOutputConfig_),
std::make_unique<OdasPositionSource>(10020), implementationFactory.getCameraReader(videoInputConfig_),
implementationFactory.getFisheyeDewarper(), implementationFactory.getObjectFactory(),
std::make_unique<VirtualCameraOutput>(videoOutputConfig_), implementationFactory.getSynchronizer(),
std::make_unique<OdasPositionSource>(10020), implementationFactory_.getCameraReader(videoInputConfig_),
implementationFactory_.getFisheyeDewarper(), implementationFactory_.getObjectFactory(),
std::make_unique<VirtualCameraOutput>(videoOutputConfig_), implementationFactory_.getSynchronizer(),
std::make_unique<VirtualCameraManager>(aspectRatio, minElevation, maxElevation), detectionQueue, imageBuffer_,
implementationFactory.getImageConverter(), dewarpingConfig_, videoInputConfig_, videoOutputConfig_,
implementationFactory_.getImageConverter(), dewarpingConfig_, videoInputConfig_, videoOutputConfig_,
audioInputConfig_, audioOutputConfig_);
}

Expand All @@ -97,4 +95,4 @@ void Stream::stop()
mediaThread_->join();
}

} // namespace Model
} // namespace Model
2 changes: 2 additions & 0 deletions c_plus_plus/src/model/stream/stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "model/stream/utils/alloc/i_object_factory.h"
#include "model/stream/video/detection/detection_thread.h"
#include "model/stream/video/dewarping/models/dewarping_config.h"
#include "model/stream/video/impl/implementation_factory.h"
#include "model/stream/video/video_config.h"

namespace Model
Expand All @@ -35,6 +36,7 @@ class Stream : public IStream
std::unique_ptr<DetectionThread> detectionThread_;
std::unique_ptr<IObjectFactory> objectFactory_;
std::shared_ptr<LockTripleBuffer<Image>> imageBuffer_;
ImplementationFactory implementationFactory_;
};

} // namespace Model
Expand Down
21 changes: 21 additions & 0 deletions c_plus_plus/src/model/stream/utils/alloc/i_object_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <vector>

#include "model/stream/utils/images/images.h"
#include "model/stream/utils/models/circular_buffer.h"
#include "model/stream/utils/models/dual_buffer.h"
#include "model/stream/utils/threads/lock_triple_buffer.h"
#include "model/stream/video/dewarping/models/dewarping_mapping.h"
Expand Down Expand Up @@ -37,6 +38,26 @@ class IObjectFactory
deallocateObject(buffer.getInUse());
}

template <typename T>
void allocateObjectCircularBuffer(CircularBuffer<T>& buffer) const
{
for (std::size_t i = 0; i < buffer.size(); ++i)
{
allocateObject(buffer.current());
buffer.next();
}
}

template <typename T>
void deallocateObjectCircularBuffer(CircularBuffer<T>& buffer) const
{
for (std::size_t i = 0; i < buffer.size(); ++i)
{
deallocateObject(buffer.current());
buffer.next();
}
}

template <typename T>
void allocateObjectLockTripleBuffer(LockTripleBuffer<T>& buffer) const
{
Expand Down
15 changes: 3 additions & 12 deletions c_plus_plus/src/model/stream/utils/models/circular_buffer.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef CIRCULAR_BUFFER_H
#define CIRCULAR_BUFFER_H

#include <memory>
#include <vector>

namespace Model
{
Expand All @@ -11,25 +11,16 @@ class CircularBuffer
public:
explicit CircularBuffer(std::size_t size, const T& value = T())
: size_(size)
, buffers_(std::make_unique<T[]>(size))
, buffers_(size, value)
, index_(0)
{
for (std::size_t i = 0; i < size_; ++i)
{
buffers_[i] = value;
}
}

std::size_t size()
{
return size_;
}

const std::unique_ptr<T[]>& buffers()
{
return buffers_;
}

T& current()
{
return buffers_[index_];
Expand All @@ -42,7 +33,7 @@ class CircularBuffer

private:
std::size_t size_;
std::unique_ptr<T[]> buffers_;
std::vector<T> buffers_;
int index_;
};

Expand Down
14 changes: 7 additions & 7 deletions c_plus_plus/src/model/stream/video/input/camera_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ CameraReader::CameraReader(const VideoConfig& videoConfig, std::size_t bufferCou

CameraReader::~CameraReader()
{
const std::unique_ptr<IndexedImage[]>& buffers = images_.buffers();
for (std::size_t i = 0; i < images_.size(); ++i)
{
unmapBuffer(buffers[i]);
unmapBuffer(images_.current());
images_.next();
}

close(fd_);
Expand Down Expand Up @@ -124,10 +124,10 @@ void CameraReader::requestBuffers(std::size_t bufferCount)
throw std::runtime_error("Failed to request buffers for camera " + videoConfig_.deviceName);
}

const std::unique_ptr<IndexedImage[]>& buffers = images_.buffers();
for (std::size_t i = 0; i < images_.size(); ++i)
{
mapBuffer(buffers[i]);
mapBuffer(images_.current());
images_.next();
}
}

Expand Down Expand Up @@ -213,9 +213,9 @@ void CameraReader::setImageFormat()
else if (fmt.fmt.pix.width != (unsigned int)videoConfig_.resolution.width ||
fmt.fmt.pix.height != (unsigned int)videoConfig_.resolution.height)
{
throw std::runtime_error("Camera does not support specified image size : " +
std::to_string(videoConfig_.resolution.width) + " x " +
std::to_string(videoConfig_.resolution.height));
throw std::runtime_error(
"Camera does not support specified image size : " + std::to_string(videoConfig_.resolution.width) + " x " +
std::to_string(videoConfig_.resolution.height));
}
else
{
Expand Down
Loading

0 comments on commit d70c94e

Please sign in to comment.