#define _CRT_SECURE_NO_WARNINGS

#include <memory>
#include <thread>
#include <unordered_map>

#include <thread>

// Sample
#include <framework/DataPath.hpp>
#include <framework/ProgramArguments.hpp>
#include <framework/SampleFramework.hpp>
#include <framework/MathUtils.hpp>
#include <framework/Checks.hpp>

// SAL
#include <dw/sensors/Sensors.h>
#include <dw/sensors/camera/Camera.h>

// CORE
#include <dw/core/Context.h>
#include <dw/core/Logger.h>

// Raw
#include <dw/isp/SoftISP.h>

// Renderer
#include <dw/renderer/Renderer.h>

// LaneDetector
#include <dw/lanes/LaneDetector.h>

// DNN
#include <dnn_common/DNNInference.hpp>

// FreeSpaceDetector
#include <dw/freespace/FreeSpaceDetector.h>

// IMAGE
#include <dw/image/FormatConverter.h>
#include <dw/image/ImageStreamer.h>

#include <string>

// Calibrated camera
#include <dw/rigconfiguration/RigConfiguration.h>
#include <dw/rigconfiguration/Camera.h>

// Input/Output
#include <dnn_common/ISensorIO.hpp>
#include <dnn_common/SensorIOCuda.hpp>

#ifdef VIBRANTE
#include <dnn_common/SensorIONvmedia.hpp>
#endif


class CLaneObject{
public:
    // SDK
    dwContextHandle_t               m_context           = DW_NULL_HANDLE;   //one for all

    // Cameras
    std::string                     m_inputType;                            //for laneNet
    dwSALHandle_t                   m_sal               = DW_NULL_HANDLE;   //one for all
    dwSensorHandle_t                m_cameraSensor      = DW_NULL_HANDLE;   //one for all

    // Renderers
    dwRendererHandle_t              m_renderer          = DW_NULL_HANDLE;   //one for all
    dwRenderBufferHandle_t          m_lineBuffer        = DW_NULL_HANDLE;   //for laneNet
    dwRenderBufferHandle_t          m_boxLineBuffer     = DW_NULL_HANDLE;   //for obj detect
    dwRenderBufferHandle_t          m_freeLineBuffer;                       //for freespace

    // Streamer and converters
    bool                            m_isRaw;                                //for laneNet
    dwSoftISPHandle_t               m_softISP;                              //for laneNet
    cudaStream_t                    m_cudaStream;                           //Cuda Streamer

    dwImageCUDA *                   m_rgbaImage;                            //for object detection
    dwImageCUDA *                   m_yuvImage;                             //for object detection
    dwImageCUDA                     m_frameCUDArgba;                        //for laneNet
    dwImageCUDA                     m_frameCUDArcb;                         //for laneNet
    dwImageCUDA                     m_rcbImage;


    dwImageProperties               m_cameraImageProperties;
    dwCameraProperties              m_cameraProperties;

    dwImageStreamerHandle_t         m_cuda2gl           = DW_NULL_HANDLE;   //pay attention //for laneNet
    dwImageStreamerHandle_t         m_nvMedia2Cuda      = DW_NULL_HANDLE;   //pay attention //for laneNet
    dwImageFormatConverterHandle_t  m_yuv2rgba          = DW_NULL_HANDLE;   //pay attention //for laneNet

    dwImageStreamerHandle_t         m_streamerInput2CUDA;
    dwImageStreamerHandle_t         m_streamerCamera2GL;
    dwImageFormatConverterHandle_t  m_converterInput2Rgba;

    dwImageStreamerHandle_t             m_input2cuda; //redundancy

    //Screen geometry
    dwRect                          m_screenRectangle;

    uint32_t                        m_windowWidth;
    uint32_t                        m_windowHeight;

    uint32_t                        m_cameraWidth;        //for laneNet
    uint32_t                        m_cameraHeight;       //for laneNet

    float32_t                       m_drawScaleX;
    float32_t                       m_drawScaleY;

    // global variables to member
    WindowBase*                     m_window;
    bool                            m_run;
    ProgramArguments                m_arguments;
    std::string                     m_title;

    // Lane Detector
    float32_t                       m_threshold;
    dwLaneDetectorHandle_t          m_laneDetector;
    dwRigConfigurationHandle_t      m_rigConfig       = DW_NULL_HANDLE;
    dwCameraRigHandle_t             m_cameraRig       = DW_NULL_HANDLE;
    dwCalibratedCameraHandle_t      m_calibratedCam   = DW_NULL_HANDLE;

    dwLaneDetection                 m_lanes{};

    // spectial for freespace
    dwFreeSpaceDetectorHandle_t     m_freeSpaceDetector;
    dwFreeSpaceDetection            m_boundary;
    bool                            m_rig;
    uint32_t                        m_spatialSmoothFilterWidth;
    float32_t                       m_temporalSmoothFactor;

    // spectial for object detection
    std::unique_ptr<ISensorIO>      m_sensorIO;           //for object detection
    std::unique_ptr<DNNInference>   m_dnnInference;       //for object detection
    std::vector<dwBox2D>            m_dnnBoxList;         //for object detection

    // call back
    void (*m_userKeyPressCallback)(int) = 0;

    CLaneObject(){
        m_window = nullptr;
        m_context = DW_NULL_HANDLE;
        m_inputType = "video";
        m_sal = DW_NULL_HANDLE;
        m_cameraSensor = DW_NULL_HANDLE;
        m_renderer = DW_NULL_HANDLE;
        m_freeLineBuffer                = DW_NULL_HANDLE;
        m_laneDetector = DW_NULL_HANDLE;
        m_isRaw = false;
        m_rig                           = false;
        m_run = false;
        m_frameCUDArgba = {};
        m_frameCUDArcb = {};
        m_cameraImageProperties = {};
        m_cameraProperties = {};
        m_softISP = DW_NULL_HANDLE;
        m_streamerInput2CUDA = DW_NULL_HANDLE;
        m_converterInput2Rgba = DW_NULL_HANDLE;
        m_streamerCamera2GL = DW_NULL_HANDLE;
        m_screenRectangle = {};
        m_cameraWidth = 0;
        m_cameraHeight = 0;
        m_threshold = 0;
        m_cudaStream = 0;
        m_lanes = {};

        m_spatialSmoothFilterWidth      = 5;
        m_rcbImage                      = {};
        m_freeSpaceDetector             = DW_NULL_HANDLE;
        m_boundary                      = {};
    }



public:

    // InitDriveworks()
    bool initDriveworks(){
        // create a logger to log to console
        // we keep the ownership of the logger at the application level
        dwLogger_initialize(getConsoleLoggerCallback(true));
        dwLogger_setLogLevel(DW_LOG_DEBUG);

        // instantiate Driveworks SDK context
        dwContextParameters     sdkParams{};

        std::string path = DataPath::get();
        sdkParams.dataPath = path.c_str();

#ifdef VIBRANTE
        sdkParams.eglDisplay = m_window->getEGLDisplay();
#endif
        return dwInitialize(&m_context, DW_VERSION, &sdkParams) == DW_SUCCESS;
    }// InitDriveworks()


    //create Video replay
    void createVideoReplay(dwSensorHandle_t &salSensor,
                           float32_t        &cameraFrameRate,
                           dwSALHandle_t    sal){

        //lanenet version
        dwSensorParams params;
        std::string parameterString = m_arguments.parameterString();

        params.parameters           = parameterString.c_str();
        params.protocol = "camera.virtual";

        dwSAL_createSensor(&salSensor, params, sal);
        dwSensorCamera_getSensorProperties(&m_cameraProperties, salSensor);
        dwSensorCamera_getImageProperties(&m_cameraImageProperties,
                                          DW_CAMERA_PROCESSED_IMAGE,
                                          salSensor);

        m_cameraWidth = m_cameraImageProperties.width;
        m_cameraHeight = m_cameraImageProperties.height;
        cameraFrameRate = m_cameraProperties.framerate;
    }//create Video replay


    // InitCameras()2
    bool initCameras(){
        dwStatus result = DW_FAILURE;

        // create sensor abstraction layer
        result = dwSAL_initialize(&m_sal, m_context);
        if (result != DW_SUCCESS) {
            std::cerr << "Cannot init sal: " << dwGetStatusName(result) << std::endl;
            return false;
        }

        //create GMSL Camera interface
        float32_t   cameraFramerate = 0.0f;

        createVideoReplay(m_cameraSensor,
                          cameraFramerate,
                          m_sal);
        std::cout << "Camera image with " << m_cameraWidth << "x" << m_cameraHeight << " at "
                  << cameraFramerate << " FPS" << std::endl;

        m_sensorIO.reset(new SensorIOCuda(m_context, 0, m_cameraSensor, m_cameraWidth, m_cameraHeight));

        m_run = m_run && dwSensor_start(m_cameraSensor) == DW_SUCCESS;
        return true;
    }

    // laneNet version setupRenderer
    void setupRenderer(dwRendererHandle_t &renderer, const dwRect &screenRect, dwContextHandle_t dwSdk){
        CHECK_DW_ERROR(dwRenderer_initialize(&renderer, dwSdk));

        float32_t boxColor[4] = {0.0f, 1.0f, 0.0f, 1.0f};
        dwRenderer_setColor(boxColor, renderer);
        dwRenderer_setLineWidth(2.0f, renderer);
        dwRenderer_setRect(screenRect, renderer);


    }//laneNet version setupRenderer

    // dnn version setupRenderer
    void setupRenderer(dwRendererHandle_t &renderer, dwContextHandle_t dwSdk){
        CHECK_DW_ERROR( dwRenderer_initialize(&renderer, dwSdk) );

        float32_t rasterTransform[9];
        rasterTransform[0] = 1.0f;
        rasterTransform[3] = 0.0f;
        rasterTransform[6] = 0.0f;
        rasterTransform[1] = 0.0f;
        rasterTransform[4] = 1.0f;
        rasterTransform[7] = 0.0f;
        rasterTransform[2] = 0.0f;
        rasterTransform[5] = 0.0f;
        rasterTransform[8] = 1.0f;

        dwRenderer_set2DTransform(rasterTransform, renderer);
        float32_t boxColor[4] = {0.0f,1.0f,0.0f,1.0f};
        dwRenderer_setColor(boxColor, renderer);
        dwRenderer_setLineWidth(2.0f, renderer);
        dwRenderer_setRect(m_screenRectangle, renderer);
    }

    // InitRenderer()2
    void setupLineBuffer(dwRenderBufferHandle_t &lineBuffer, unsigned int maxLines, dwContextHandle_t dwSdk){
        dwRenderBufferVertexLayout layout;
        layout.posFormat   = DW_RENDER_FORMAT_R32G32_FLOAT;
        layout.posSemantic = DW_RENDER_SEMANTIC_POS_XY;
        layout.colFormat   = DW_RENDER_FORMAT_NULL;
        layout.colSemantic = DW_RENDER_SEMANTIC_COL_NULL;
        layout.texFormat   = DW_RENDER_FORMAT_NULL;
        layout.texSemantic = DW_RENDER_SEMANTIC_TEX_NULL;
        dwRenderBuffer_initialize(&lineBuffer, layout, DW_RENDER_PRIM_LINELIST, maxLines, dwSdk);
        dwRenderBuffer_set2DCoordNormalizationFactors((float32_t)m_cameraWidth,
                                                      (float32_t)m_cameraHeight, lineBuffer);
    }// setupLineBuffer()

    // InitRenderer()3
    bool initRenderer(){
        m_screenRectangle.height = m_window->height();
        m_screenRectangle.width = m_window->width();
        m_screenRectangle.x = 0;
        m_screenRectangle.y = 0;

        unsigned int maxLines = 20000;
        //laneNet version
        //etupRenderer(m_renderer, m_screenRectangle, m_context);
        //dnn version
        setupRenderer(m_renderer, m_context);

        setupLineBuffer(m_lineBuffer, maxLines, m_context);
        setupLineBuffer(m_boxLineBuffer, maxLines, m_context);
        setupLineBuffer(m_freeLineBuffer, maxLines, m_context);

        return true;
    }//InitRenderer()

    // InitRigConfiguration()
    bool initRigConfiguration(){
        dwStatus result = DW_SUCCESS;
        // load vehicle cofiguration
        result = dwRigConfiguration_initializeFromFile(&m_rigConfig, m_context, m_arguments.get("rig").c_str());
        if(result != DW_SUCCESS){
            std::cerr<<"Error dwRigConfiguration_initialize: " << dwGetStatusName(result)<<std::endl;
            return false;
        }

        uint32_t cameraCount;
        result = dwCameraRig_initializeFromConfig(&m_cameraRig, &cameraCount,
                                                  &m_calibratedCam,
                                                  1U,m_context, m_rigConfig);
        if(result != DW_SUCCESS){
            std::cerr << "Error dwCameraRig_initializeFromConfig: " << dwGetStatusName(result) << std::endl;
            return false;
        }
        return true;
    }    //InitRigConfiguration()

    // initPipeline()
    bool initPipeline(){
        dwStatus status = DW_FAILURE;

        dwImageProperties       cameraImageProperties = m_cameraImageProperties;

        // Input -> RGBA format converter
        dwImageProperties       displayImageProperties = cameraImageProperties;
        status = dwSensorCamera_getImageProperties(&cameraImageProperties, DW_CAMERA_PROCESSED_IMAGE, m_cameraSensor);
        if (status != DW_SUCCESS) {
            std::cerr << "Cannot get image properties: " << dwGetStatusName(status) << std::endl;
            return false;
        }

        displayImageProperties.pxlFormat = DW_IMAGE_RGBA;
        displayImageProperties.pxlType = DW_TYPE_UINT8;
        displayImageProperties.planeCount = 1;
        displayImageProperties.type = DW_IMAGE_CUDA;
        cameraImageProperties.type = DW_IMAGE_CUDA;
        status = dwImageFormatConverter_initialize(&m_converterInput2Rgba, cameraImageProperties.type, m_context);
        if (status != DW_SUCCESS) {
            std::cerr << "Cannot initialize input -> rgba format converter: " << dwGetStatusName(status) << std::endl;
            return false;
        }
        // Camera -> GL image streamer
        status = dwImageStreamer_initialize(&m_streamerCamera2GL, &displayImageProperties, DW_IMAGE_GL, m_context);

        if (status != DW_SUCCESS) {
            std::cerr << "Cannot init GL streamer: " << dwGetStatusName(status) << std::endl;
            return false;
        }

        // Setup RGBA CUDA image
        {
            void *dptr = nullptr;
            size_t pitch = 0;
            cudaMallocPitch(&dptr, &pitch, m_cameraWidth * 4, m_cameraHeight);
            dwImageCUDA_setFromPitch(&m_frameCUDArgba, dptr, m_cameraWidth, m_cameraHeight,
                                     pitch, DW_IMAGE_RGBA);
        }
        // Set camera width and height for DNN
        m_cameraWidth = cameraImageProperties.width;
        m_cameraHeight = cameraImageProperties.height;


        return true;
    }// initPipeline()

    // initLaneNetWithRig()
    bool initLaneNetWithRig(){
        dwStatus res = DW_FAILURE;
        dwTransformation transformation{};
        res = dwRigConfiguration_getSensorToRigTransformation(&transformation, 0, m_rigConfig);
        if(res != DW_SUCCESS){
            std::cerr << "Cannot parse rig configuration: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        float32_t maxDistance = 50.0f;
        std::string maxDistanceStr = m_arguments.get("maxDistance");
        if(maxDistanceStr != "50.0"){
            try{
                maxDistance = std::stof(maxDistanceStr);
                if(maxDistance < 0.0f){
                    std::cerr << "maxDistance cannot be negative." << std::endl;
                    return false;
                }
            }catch(...){
                std::cerr << "Given maxDistance can't be parsed" << std::endl;
                return false;
            }
        }
        res = dwLaneDetector_initializeLaneNet(&m_laneDetector,
                                               m_cameraWidth, m_cameraHeight,
                                               m_context);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot initialize LaneNet: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        res = dwLaneDetector_setCameraHandle(m_calibratedCam, m_laneDetector);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot initialize lane detector calibrated camera: "
                      << dwGetStatusName(res) << std::endl;
            return false;
        }

        res = dwLaneDetector_setCameraExtrinsics(transformation, m_laneDetector);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot initialize lane detector camera extrinsics: "
                      << dwGetStatusName(res) << std::endl;
            return false;
        }

        res = dwLaneDetector_setMaxLaneDistance(maxDistance, m_laneDetector);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot set lane detector maximum detection distance in meter: "
                      << dwGetStatusName(res) << std::endl;
            return false;
        }

        float32_t fov;
        res = dwCalibratedCamera_getHorizontalFOV(&fov, m_calibratedCam);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot get camera horizontal FOV: "
                      << dwGetStatusName(res) << std::endl;
            return false;
        }

        res =  dwLaneDetectorLaneNet_setHorizontalFOV(RAD2DEG(fov), m_laneDetector);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot set camera horizontal FOV: "
                      << dwGetStatusName(res) << std::endl;
            return false;
        }

        m_threshold = 0.3f;
        std::string inputThreshold = m_arguments.get("threshold");
        if(inputThreshold!="0.3"){
            try{
                m_threshold = std::stof(inputThreshold);
            } catch(...) {
                std::cerr << "Given threshold can't be parsed" << std::endl;
                return false;
            }
        }

        res = dwLaneDetectorLaneNet_setDetectionThreshold(m_threshold, m_laneDetector);
        if (res != DW_SUCCESS){
            std::cerr << "Cannot set LaneNet threshold: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        return true;
    }// initLaneNetWithRig()

    bool initDNN(){
        m_dnnInference.reset(new DNNInference(m_context));

        std::string tensorRTModel = m_arguments.get("tensorRT_model");
        std::cout<<"Initializing TensorRT Network: " <<tensorRTModel.c_str()<<std::endl;

        m_dnnInference->buildFromTensorRT(tensorRTModel.c_str());

        // for freespace
        dwStatus res = DW_FAILURE;

        dwTransformation transformation{};
        res = dwRigConfiguration_getSensorToRigTransformation(&transformation, 0, m_rigConfig);
        if (res != DW_SUCCESS) //only compute free space bounday in image space
        {
            std::cerr << "Cannot parse rig configuration: " << dwGetStatusName(res) <<std::endl;
            std::cerr << "Compute free space boundary in image space only." <<std::endl;
            res = dwFreeSpaceDetector_initializeFreeSpaceNet(&m_freeSpaceDetector,
                                                             m_cameraWidth, m_cameraHeight,
                                                             m_cudaStream,
                                                             m_context);
        }
        else //compute free space bounday in image and vehicle coordinates
        {
            float32_t maxDistance = 50.0f;
            std::string maxDistanceStr = m_arguments.get("maxDistance");
            if(maxDistanceStr!="50.0") {
                try{
                    maxDistance = std::stof(maxDistanceStr);
                    if (maxDistance < 0.0f) {
                        std::cerr << "maxDistance cannot be negative." << std::endl;
                        return false;
                    }
                } catch(...) {
                    std::cerr << "Given maxDistance can't be parsed" << std::endl;
                    return false;
                }
            }
            res = dwFreeSpaceDetector_initializeCalibratedFreeSpaceNet(&m_freeSpaceDetector,
                                                                       m_cameraWidth, m_cameraHeight,
                                                                       m_cudaStream, transformation, maxDistance,
                                                                       m_calibratedCam, m_context);
        }

        if (m_isRaw){
            // this is in case the network requires a different tonemapper than the default one
            dwDNNMetaData metaData;
            dwFreeSpaceDetector_getDNNMetaData(&metaData, m_freeSpaceDetector);
            dwSoftISP_setTonemapType(metaData.tonemapType, m_softISP);
        }

        if (res != DW_SUCCESS)
        {
            std::cerr << "Cannot initialize FreeSpaceNet: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        //Default to no spatial filtering, uncomment this block to customize free space boundary spatial smoothing filter width
        res = dwFreeSpaceDetector_setSpatialSmoothFilterWidth(m_spatialSmoothFilterWidth, m_freeSpaceDetector);
        if (res != DW_SUCCESS)
        {
            std::cerr << "Cannot set free space boundary spatial smooth filter: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        // customize ROI to a subset in the center of the frame
        dwRect roi = {int32_t(m_cameraWidth)/10,   int32_t(m_cameraHeight)/10,
                      int32_t(m_cameraWidth)*8/10, int32_t(m_cameraHeight)*8/10};
        res = dwFreeSpaceDetector_setDetectionROI(&roi, m_freeSpaceDetector);
        if (res != DW_SUCCESS)
        {
            std::cerr << "Cannot set FreeSpaceNet detection ROI: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        // customize free space boundary temporal smoothing factor
        res = dwFreeSpaceDetector_setTemporalSmoothFactor(m_temporalSmoothFactor, m_freeSpaceDetector);
        if (res != DW_SUCCESS)
        {
            std::cerr << "Cannot set free space boundary temporal smooth factor: " << dwGetStatusName(res) << std::endl;
            return false;
        }

        // boundary points are in camera space

        m_drawScaleX = static_cast<float32_t>(m_windowWidth)/static_cast<float32_t>(m_cameraWidth);
        m_drawScaleY = static_cast<float32_t>(m_windowHeight)/static_cast<float32_t>(m_cameraHeight);
        std::cout<<"windowWidth: "<<m_windowWidth<<" cameraWidth: "<<m_cameraWidth<<" drawScaleX: "<<m_drawScaleX<<std::endl;
        std::cout<<"windowWidth/cameraWidth: "<<static_cast<float32_t>(m_windowWidth)/static_cast<float32_t>(m_cameraWidth)<<std::endl;

        return true;
    }


public:
    bool initializeModules(){
        if(!initDriveworks())       return false;
        if(!initCameras())          return false;
        if(!initRenderer())         return false;
        m_rig = initRigConfiguration();
        if(!m_rig) return false;
        if(!initPipeline())         return false;
        if(!initLaneNetWithRig())   return false;
        if(!initDNN())              return false;

        m_run = m_run && dwSensor_start(m_cameraSensor) == DW_SUCCESS;
        return true;
    }

    //initSampleApp()
    bool initSampleApp( int argc, const char **argv,
                        const ProgramArguments* args,
                        void (*userKeyPressCallback)(int),
                        uint32_t width, uint32_t height)
    {
        //gUserKeyPressCallback = userKeyPressCallback;
        m_userKeyPressCallback = userKeyPressCallback;
    #if (!WINDOWS)
        struct sigaction action = {};
        action.sa_handler = sig_int_handler;

        sigaction(SIGHUP, &action, NULL);  // controlling terminal closed, Ctrl-D
        sigaction(SIGINT, &action, NULL);  // Ctrl-C
        sigaction(SIGQUIT, &action, NULL); // Ctrl-\, clean quit with core dump
        sigaction(SIGABRT, &action, NULL); // abort() called.
        sigaction(SIGTERM, &action, NULL); // kill command
        sigaction(SIGSTOP, &action, NULL); // kill command
    #endif
        m_windowWidth = width;
        m_windowHeight = height;

        if (args)
        {
            m_arguments = *args;

            if (!m_arguments.parse(argc, argv)){
                exit(1); // Exit if not all require args are provided
            }

            std::string argstring = m_arguments.printList();
            if (argstring.size() > 0) {
                std::cout << "Program args:\n" << argstring << std::endl;
            }
        }
        m_run = true;

        // Setup Window for Output, initialize the GL and GLFW
        try {
            m_window = m_window ? m_window : new WindowGLFW(width, height);
        }
        catch (const std::exception &/*ex*/) {
            m_window = nullptr;
        }

        if (m_window == nullptr)
            return false;

        m_window->makeCurrent();
        //m_window->setOnKeypressCallback(keyPressCallback);
        //m_window->setOnKeypressCallback(keyPressCB);
        return true;
    }//initSampleApp()

    //drawLaneDetectionROI()
    void drawLaneDetectionROI(dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer){
        dwRect roi{};
        dwLaneDetector_getDetectionROI(&roi, m_laneDetector);
        float32_t x_start = static_cast<float32_t>(roi.x);
        float32_t x_end   = static_cast<float32_t>(roi.x + roi.width);
        float32_t y_start = static_cast<float32_t>(roi.y);
        float32_t y_end   = static_cast<float32_t>(roi.y + roi.height);
        float32_t *coords     = nullptr;
        uint32_t maxVertices  = 0;
        uint32_t vertexStride = 0;
        dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);
        coords[0]  = x_start;
        coords[1]  = y_start;
        coords    += vertexStride;
        coords[0]  = x_start;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_start;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_end;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_end;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0] = x_end;
        coords[1] = y_start;
        coords    += vertexStride;
        coords[0] = x_end;
        coords[1] = y_start;
        coords    += vertexStride;
        coords[0] = x_start;
        coords[1] = y_start;
        dwRenderBuffer_unmap(8, renderBuffer);
        dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
        dwRenderer_setLineWidth(2, renderer);
        dwRenderer_renderBuffer(renderBuffer, renderer);
    }//drawLaneDetectionROI()

    //drawLaneMarkingsCustomColor()
    void drawLaneMarkingsCustomColor(float32_t laneColors[][4], uint32_t nColors,
                                              const dwLaneDetection &lanes, float32_t laneWidth,
                                              dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer){
        //drawLaneDetectionROI(renderBuffer, renderer);

        for (uint32_t i = 0; i < lanes.numLaneMarkings; ++i) {

            const dwLaneMarking& laneMarking = lanes.laneMarkings[i];

            dwLanePositionType category = laneMarking.positionType;

            if(category==DW_LANEMARK_POSITION_ADJACENT_LEFT)
                dwRenderer_setColor(laneColors[0 % nColors], renderer);
            else if(category==DW_LANEMARK_POSITION_EGO_LEFT)
                dwRenderer_setColor(laneColors[1 % nColors], renderer);
            else if(category==DW_LANEMARK_POSITION_EGO_RIGHT)
                dwRenderer_setColor(laneColors[2 % nColors], renderer);
            else if(category==DW_LANEMARK_POSITION_ADJACENT_RIGHT)
                dwRenderer_setColor(laneColors[3 % nColors], renderer);
            else
                dwRenderer_setColor(laneColors[4 % nColors], renderer);

            dwRenderer_setLineWidth(laneWidth, renderer);

            float32_t* coords = nullptr;
            uint32_t maxVertices = 0;
            uint32_t vertexStride = 0;
            dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);

            uint32_t n_verts = 0;
            dwVector2f previousP{};
            bool firstPoint = true;

            for (uint32_t j = 0; j < laneMarking.numPoints; ++j) {

                dwVector2f center;
                center.x = laneMarking.imagePoints[j].x;
                center.y = laneMarking.imagePoints[j].y;

                if (firstPoint) { // Special case for the first point
                    previousP = center;
                    firstPoint = false;
                }
                else {
                    n_verts += 2;
                    if(n_verts > maxVertices)
                        break;

                    coords[0] = static_cast<float32_t>(previousP.x);
                    coords[1] = static_cast<float32_t>(previousP.y);
                    coords += vertexStride;

                    coords[0] = static_cast<float32_t>(center.x);
                    coords[1] = static_cast<float32_t>(center.y);
                    coords += vertexStride;

                    previousP = center;
                }
            }
            dwRenderBuffer_unmap(n_verts, renderBuffer);
            dwRenderer_renderBuffer(renderBuffer, renderer);
        }
    }//drawLaneMarkingsCustomColor()

    //drawLaneMarkings()
    void drawLaneMarkings(const dwLaneDetection &lanes, float32_t laneWidth,
                                   dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer){
        const float32_t DW_RENDERER_COLOR_DARKYELLOW[4]  = { 180.0f/255.0f, 180.0f/255.0f,  10.0f/255.0f, 1.0f};
        //const float32_t DW_RENDERER_COLOR_CYAN[4]      = {   10.0f/255.0f,   230.0f/255.0f,   230.0f/255.0f, 1.0f};

        float32_t colors[5][4];

        //memcpy(colors[0], DW_RENDERER_COLOR_CYAN, sizeof(colors[0]));
        //memcpy(colors[1], DW_RENDERER_COLOR_RED, sizeof(colors[1]));
        memcpy(colors[0], DW_RENDERER_COLOR_BLUE, sizeof(colors[3]));
        memcpy(colors[1], DW_RENDERER_COLOR_GREEN, sizeof(colors[2]));
        memcpy(colors[2], DW_RENDERER_COLOR_GREEN, sizeof(colors[2]));
        memcpy(colors[3], DW_RENDERER_COLOR_BLUE, sizeof(colors[3]));
        memcpy(colors[4], DW_RENDERER_COLOR_DARKYELLOW, sizeof(colors[3]));

        drawLaneMarkingsCustomColor(colors, 5, lanes, laneWidth, renderBuffer, renderer);
    }//drawLaneMarkings()


    //drawFreeSpaceDetectionROI()
    void drawFreeSpaceDetectionROI(dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer)
    {
        dwRect roi{};
        dwFreeSpaceDetector_getDetectionROI(&roi,m_freeSpaceDetector);
        //float32_t x_start = static_cast<float32_t>(roi.x)*m_drawScaleX;
        //float32_t x_end   = static_cast<float32_t>(roi.x + roi.width)*m_drawScaleX;
        //float32_t y_start = static_cast<float32_t>(roi.y)*m_drawScaleY;
        //float32_t y_end   = static_cast<float32_t>(roi.y + roi.height)*m_drawScaleY;
        float32_t x_start = static_cast<float32_t>(roi.x);
        float32_t x_end   = static_cast<float32_t>(roi.x + roi.width);
        float32_t y_start = static_cast<float32_t>(roi.y);
        float32_t y_end   = static_cast<float32_t>(roi.y + roi.height);
        float32_t *coords     = nullptr;
        uint32_t maxVertices  = 0;
        uint32_t vertexStride = 0;
        dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);
        coords[0]  = x_start;
        coords[1]  = y_start;
        coords    += vertexStride;
        coords[0]  = x_start;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_start;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_end;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0]  = x_end;
        coords[1]  = y_end;
        coords    += vertexStride;
        coords[0] = x_end;
        coords[1] = y_start;
        coords    += vertexStride;
        coords[0] = x_end;
        coords[1] = y_start;
        coords    += vertexStride;
        coords[0] = x_start;
        coords[1] = y_start;
        dwRenderBuffer_unmap(8, renderBuffer);
        dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
        dwRenderer_setLineWidth(2, renderer);
        dwRenderer_renderBuffer(renderBuffer, renderer);
    }//drawFreeSpaceDetectionROI()

    //drawFreeSpaceBoundary()
    void drawFreeSpaceBoundary(dwFreeSpaceDetection* boundary, dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer)
    {
        drawFreeSpaceDetectionROI(renderBuffer, renderer);

        uint32_t n_verts = 0;
        float32_t* coords= nullptr;
        uint32_t maxVertices = 0;
        uint32_t vertexStride = 0;
        dwFreeSpaceBoundaryType category = boundary->boundaryType[0];
        float32_t maxWidth = 8.0; //10 meters as a step, [0, 10) will have max line width
        float32_t witdhRatio = 0.8;
        float32_t dist2Width[20];
        dist2Width[0] = maxWidth;
        for(uint32_t i = 1; i < 20; i++)
            dist2Width[i] = dist2Width[i-1]*witdhRatio;

        float32_t prevWidth, curWidth = maxWidth/2;
        if(m_rig) {
            prevWidth = dist2Width[static_cast<uint32_t>(boundary->boundaryWorldPoint[0].x/10)];
        } else {
            prevWidth = curWidth;
        }

        dwRenderer_setLineWidth(prevWidth, renderer);

        if(category==DW_BOUNDARY_TYPE_OTHER)
            dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
        else if(category==DW_BOUNDARY_TYPE_CURB)
            dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
            //dwRenderer_setColor(DW_RENDERER_COLOR_GREEN, renderer);
        else if(category==DW_BOUNDARY_TYPE_VEHICLE)
            dwRenderer_setColor(DW_RENDERER_COLOR_RED, renderer);
        else if(category==DW_BOUNDARY_TYPE_PERSON)
            dwRenderer_setColor(DW_RENDERER_COLOR_BLUE, renderer);

        dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);

        for (uint32_t i = 1; i < boundary->numberOfBoundaryPoints; ++i) {
            if(m_rig) {
                curWidth = dist2Width[static_cast<uint32_t>(boundary->boundaryWorldPoint[i].x/10)];
            }

            if(boundary->boundaryType[i] != boundary->boundaryType[i-1] || curWidth != prevWidth) {
                dwRenderBuffer_unmap(n_verts, renderBuffer);
                dwRenderer_renderBuffer(renderBuffer, renderer);

                coords = nullptr;
                maxVertices = 0;
                vertexStride = 0;
                n_verts = 0;
                dwRenderer_setLineWidth(curWidth, renderer);

                category = boundary->boundaryType[i];
                if(category==DW_BOUNDARY_TYPE_OTHER)
                    dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
                else if(category==DW_BOUNDARY_TYPE_CURB)
                    dwRenderer_setColor(DW_RENDERER_COLOR_YELLOW, renderer);
                      //dwRenderer_setColor(DW_RENDERER_COLOR_GREEN, renderer);
                else if(category==DW_BOUNDARY_TYPE_VEHICLE)
                      dwRenderer_setColor(DW_RENDERER_COLOR_RED, renderer);
                else if(category==DW_BOUNDARY_TYPE_PERSON)
                      dwRenderer_setColor(DW_RENDERER_COLOR_BLUE, renderer);

                dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);
            }
            n_verts += 2;
            if(n_verts > maxVertices)
                break;

            /*coords[0] = static_cast<float32_t>(boundary->boundaryImagePoint[i-1].x*m_drawScaleX);
            coords[1] = static_cast<float32_t>(boundary->boundaryImagePoint[i-1].y*m_drawScaleY);
            coords += vertexStride;

            coords[0] = static_cast<float32_t>(boundary->boundaryImagePoint[i].x*m_drawScaleX);
            coords[1] = static_cast<float32_t>(boundary->boundaryImagePoint[i].y*m_drawScaleY);
            coords += vertexStride;
            prevWidth = curWidth;*/

            coords[0] = static_cast<float32_t>(boundary->boundaryImagePoint[i-1].x);
            coords[1] = static_cast<float32_t>(boundary->boundaryImagePoint[i-1].y);
            coords += vertexStride;

            coords[0] = static_cast<float32_t>(boundary->boundaryImagePoint[i].x);
            coords[1] = static_cast<float32_t>(boundary->boundaryImagePoint[i].y);
            coords += vertexStride;
            prevWidth = curWidth;
        }

        dwRenderBuffer_unmap(n_verts, renderBuffer);
        dwRenderer_renderBuffer(renderBuffer, renderer);
    }//drawFreeSpaceBoundary()

    //runDetector()
    void runDetector(dwImageCUDA* frame, dwLaneDetectorHandle_t laneDetector){
        // Run inference if the model is valid
        if (laneDetector){
            //dwLaneDetection lanes{};
            dwStatus res = dwLaneDetector_processDeviceAsync(frame, laneDetector);
            res = res == DW_SUCCESS ? dwLaneDetector_interpretHost(laneDetector) : res;
            if (res != DW_SUCCESS){
                std::cerr << "runDetector failed with: " << dwGetStatusName(res) << std::endl;
            }

            dwLaneDetector_getLaneDetections(&m_lanes, laneDetector);
            //drawLaneMarkings(m_lanes, 6.0f, m_lineBuffer, m_renderer);
        }
    }//runDetector()

    //drawBoxes()
    void drawBoxes(const std::vector<dwBox2D> &boxes, const std::vector<uint32_t> *boxIds,
                   float32_t normalizationWidth, float32_t normalizationHeight,
                   dwRenderBufferHandle_t renderBuffer, dwRendererHandle_t renderer)
    {
        if (boxes.size() == 0)
            return;

        bool renderText = boxIds && boxIds->size() == boxes.size();
        dwRenderer_setFont(DW_RENDER_FONT_VERDANA_20, renderer);
        char idString[64];

        float32_t* coords = nullptr;
        uint32_t maxVertices = 0;
        uint32_t vertexStride = 0;
        dwRenderBuffer_map(&coords, &maxVertices, &vertexStride, renderBuffer);

        uint32_t n_boxes    = static_cast<uint32_t>(boxes.size());
        uint32_t n_verts    = 8 * n_boxes;
        if (n_verts > maxVertices)
        {
            n_boxes = maxVertices / 8;
            n_verts = 8 * n_boxes;
        }

        dwRect screenRectangle;
        dwRenderer_getRect(&screenRectangle, renderer);
        float32_t screenWidth = static_cast<float32_t>(screenRectangle.width);
        float32_t screenHeight = static_cast<float32_t>(screenRectangle.height);

        for (uint32_t i = 0U; i < n_boxes; ++i)
        {
            // transform pixel coords into rendered rectangle
            float32_t x_start = static_cast<float32_t>(boxes[i].x) - 0.5f;
            float32_t y_start = static_cast<float32_t>(boxes[i].y) - 0.5f;
            float32_t x_end   = static_cast<float32_t>(boxes[i].width) + x_start;
            float32_t y_end   = static_cast<float32_t>(boxes[i].height) + y_start;

            coords[0] = x_start;
            coords[1] = y_start;
            coords += vertexStride;

            coords[0] = x_start;
            coords[1] = y_end;
            coords += vertexStride;

            coords[0] = x_start;
            coords[1] = y_end;
            coords += vertexStride;

            coords[0] = x_end;
            coords[1] = y_end;
            coords += vertexStride;

            coords[0] = x_end;
            coords[1] = y_end;
            coords += vertexStride;

            coords[0] = x_end;
            coords[1] = y_start;
            coords += vertexStride;

            coords[0] = x_end;
            coords[1] = y_start;
            coords += vertexStride;

            coords[0] = x_start;
            coords[1] = y_start;
            coords += vertexStride;

            if (renderText)
            {
    #ifndef _MSC_VER
                sprintf(idString, "%u", (*boxIds)[i]);
    #else
                sprintf_s(idString, "%u", (*boxIds)[i]);
    #endif
                dwRenderer_renderText(static_cast<int32_t>((x_start * screenWidth) / normalizationWidth),
                                      screenRectangle.height -
                                      static_cast<int32_t>((y_start * screenHeight) / normalizationHeight),
                                      idString, renderer);
            }
        }
        dwRenderBuffer_unmap(n_verts, renderBuffer);
        dwRenderer_renderBuffer(renderBuffer, renderer);
    }//drawBoxes()

    // object detector
    void runDetector()
    {
        // Run inference if the model is valid
        if (m_dnnInference->isLoaded()) {
            m_dnnBoxList.clear();
            m_dnnInference->inferSingleFrame(&m_dnnBoxList, m_rgbaImage, true);
        }
    }

    //freespace detector
    void runDetector(dwImageCUDA* frame)
    {
        // Run inference if the model is valid
        if (m_freeSpaceDetector)
        {
            //dwFreeSpaceDetection boundary{};

            dwStatus res = dwFreeSpaceDetector_processDeviceAsync(frame, m_freeSpaceDetector);
            res = res == DW_SUCCESS ? dwFreeSpaceDetector_interpretHost(m_freeSpaceDetector) : res;

            if (res != DW_SUCCESS)
            {
                std::cerr << "runDetector failed with: " << dwGetStatusName(res) << std::endl;
            }
            else
                dwFreeSpaceDetector_getBoundaryDetection(&m_boundary, m_freeSpaceDetector);
        }
        //drawFreeSpaceBoundary(&m_boundary, m_freeLineBuffer, m_renderer);
    }

    //renderCameraTexture()
    void renderCameraTexture(dwImageStreamerHandle_t streamer, dwRendererHandle_t renderer){
        dwImageGL *frameGL = nullptr;

        if (dwImageStreamer_receiveGL(&frameGL, 30000, streamer) != DW_SUCCESS) {
            std::cerr << "did not received GL frame within 30ms" << std::endl;
        } else {
            // render received texture
            dwRenderer_renderTexture(frameGL->tex, frameGL->target, renderer);
            dwImageStreamer_returnReceivedGL(frameGL, streamer);
        }
    }//renderCameraTexture()

    //runSingleCameraPipelineH264()
    dwStatus runSingleCameraPipelineH264(){
        dwStatus result                 = DW_FAILURE;
        dwCameraFrameHandle_t frame     = nullptr;
        dwImageCUDA *frameCUDAyuv       = nullptr;
        dwImageCUDA *retimg             = nullptr;

        result = dwSensorCamera_readFrame(&frame, 0, 50000, m_cameraSensor);
        if (result == DW_END_OF_STREAM)
            return result;
        if (result != DW_SUCCESS) {
            std::cout << "readFrameCUDA: " << dwGetStatusName(result) << std::endl;
            return result;
        }


        result = dwSensorCamera_getImageCUDA(&frameCUDAyuv, DW_CAMERA_PROCESSED_IMAGE, frame);
        if (result != DW_SUCCESS) {
            std::cout << "getImage: " << dwGetStatusName(result) << std::endl;
            return result;
        }

        // YUV->RGBA

        result = dwImageFormatConverter_copyConvertCUDA(&m_frameCUDArgba, frameCUDAyuv, m_converterInput2Rgba, 0);
        if (result != DW_SUCCESS) {
            std::cout << "Cannot convert to RGBA: " << dwGetStatusName(result) << std::endl;
            return result;
        }

        // we can return the frame already now, we are working with a copy from now on
        dwSensorCamera_returnFrame(&frame);

        // frame -> GL (rgba) - for rendering
        {
            result = dwImageStreamer_postCUDA(&m_frameCUDArgba, m_streamerCamera2GL);
            if (result != DW_SUCCESS) {
                std::cerr << "cannot post RGBA image" << dwGetStatusName(result) << std::endl;
                return result;
            }

            renderCameraTexture(m_streamerCamera2GL, m_renderer);

            result = dwImageStreamer_waitPostedCUDA(&retimg, 60000, m_streamerCamera2GL);
            if (result != DW_SUCCESS) {
                std::cerr << "Cannot wait post RGBA image" << dwGetStatusName(result) << std::endl;
                return result;
            }
        }
        runDetector(&m_frameCUDArgba, m_laneDetector);
        runDetector(&m_frameCUDArgba);

        return DW_SUCCESS;
    }


    //runSingleCameraPipeline()
    dwStatus runSingleCameraPipeline(){
        dwStatus status = DW_SUCCESS;
        /*if(m_isRaw)
            status  = runSingleCameraPipelineRaw();
        else*/
            status = runSingleCameraPipelineH264();

        if (status == DW_END_OF_STREAM) {
            std::cout << "Camera reached end of stream" << std::endl;
            dwSensor_reset(m_cameraSensor);
            m_run = false;
        }
        else if (status != DW_SUCCESS) {
            m_run = false;
        }

        return status;
    }//runSingleCameraPipeline()


    // releaseModules()
    void releaseModules(){

        // release one
        dwCalibratedCamera_release(&m_calibratedCam);
        dwRigConfiguration_release(&m_rigConfig);
        dwCameraRig_release(&m_cameraRig);


        // release two
        if (m_converterInput2Rgba != DW_NULL_HANDLE)
            dwImageFormatConverter_release(&m_converterInput2Rgba);

        if (m_frameCUDArgba.dptr[0])
            cudaFree(m_frameCUDArgba.dptr[0]);

        if (m_streamerCamera2GL != DW_NULL_HANDLE)
            dwImageStreamer_release(&m_streamerCamera2GL);

        if(m_isRaw){
            if (m_streamerInput2CUDA != DW_NULL_HANDLE)
                dwImageStreamer_release(&m_streamerInput2CUDA);
            if (m_softISP != DW_NULL_HANDLE)
                dwSoftISP_release(&m_softISP);
            if (m_frameCUDArcb.dptr[0])
                cudaFree(m_frameCUDArcb.dptr[0]);
        }

    #ifdef DW_USE_NVMEDIA
        if (m_converterNvMYuv2rgba != DW_NULL_HANDLE)
            dwImageFormatConverter_release(&m_converterNvMYuv2rgba);

        if (m_streamerNvMedia2CUDA != DW_NULL_HANDLE)
            dwImageStreamer_release(&m_streamerNvMedia2CUDA);

        if (m_frameNVMrgba.img != nullptr)
            NvMediaImageDestroy(m_frameNVMrgba.img);
    #endif

        dwSensor_stop(m_cameraSensor);
        dwSAL_releaseSensor(&m_cameraSensor);

        dwRenderBuffer_release(&m_lineBuffer);
        dwRenderBuffer_release(&m_boxLineBuffer);
        dwRenderBuffer_release(&m_freeLineBuffer);
        dwRenderer_release(&m_renderer);

        // release used objects in correct order
        dwSAL_release(&m_sal);
        dwLaneDetector_release(&m_laneDetector);
        dwFreeSpaceDetector_release(&m_freeSpaceDetector);
        dwRelease(&m_context);
        dwLogger_release();

    }

    // releaseSampleApp()
    void releaseSampleApp(){
        // Shutdown
        delete m_window;
        m_window = nullptr;
    }

    void plotLaneAndObjects(){

        //plot objects
        float32_t boxColor[4] = {0.0f,1.0f,0.0f,1.0f};
        dwRenderer_setColor(boxColor, m_renderer);
        dwRenderer_setLineWidth(2.0f, m_renderer);
        drawBoxes(m_dnnBoxList, NULL, static_cast<float32_t>(m_cameraWidth),
                  static_cast<float32_t>(m_cameraHeight), m_boxLineBuffer, m_renderer);
        drawLaneMarkings(m_lanes, 4.0f, m_lineBuffer, m_renderer);


        //plot freespace
        drawFreeSpaceBoundary(&m_boundary, m_freeLineBuffer, m_renderer);

    }
};//class CLaneObject


int main(int argc, const char **argv){
    //dnn object detection sample

    //laneNet sample
    const ProgramArguments args = ProgramArguments({
        ProgramArguments::Option_t("video",
                                   (DataPath::get() +
                //std::string{"/samples/laneDetection/video_lane.h264"}).c_str()),
                std::string{"/samples/localization/camera.h264"}).c_str()),

        ProgramArguments::Option_t("tensorRT_model",
                                   (DataPath::get() +
                                    std::string{"/samples/detector/tensorRT_model.bin"})
                                       .c_str()),

        ProgramArguments::Option_t("threshold", "0.3"),
        ProgramArguments::Option_t("width", "1280"), //"960"),
        ProgramArguments::Option_t("height", "800"), //"576"),
        ProgramArguments::Option_t("maxDistance", "50.0"),
        ProgramArguments::Option_t("rig", (DataPath::get() + "/samples/laneDetection/rig.xml").c_str()),
    });

    CLaneObject laneObjectNet;
    laneObjectNet.initSampleApp(argc, argv, &args, NULL, 1280, 800);
    if (!laneObjectNet.initializeModules()){
        std::cerr << "Cannot initialize DW subsystems" << std::endl;
        gRun = false;
    }

    typedef std::chrono::high_resolution_clock myclock_t;
    typedef std::chrono::time_point<myclock_t> timepoint_t;
    timepoint_t lastUpdateTime = myclock_t::now();
    //---------------------------dnn------------------------//
    dwImageGL *frameGL = nullptr;
    dwStatus result;
    //---------------------------dnn------------------------//

    // main loop
    while (laneObjectNet.m_run && !laneObjectNet.m_window->shouldClose()) {
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        std::this_thread::yield();

        // run with at most 30FPS
        std::chrono::milliseconds timeSinceUpdate = std::chrono::duration_cast<std::chrono::milliseconds>(myclock_t::now() - lastUpdateTime);
        if (timeSinceUpdate < std::chrono::milliseconds(66)){ //33
            std::this_thread::sleep_for(std::chrono::microseconds(std::chrono::milliseconds(66) -
                                                                  timeSinceUpdate));
        }

        lastUpdateTime = myclock_t::now();

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        //---------------------------dnn------------------------//
        if (frameGL) {
            laneObjectNet.m_sensorIO->releaseGLRgbaFrame();
            laneObjectNet.m_sensorIO->releaseFrame();
            frameGL = nullptr;
        }
        result = laneObjectNet.m_sensorIO->getFrame();
        if (result == DW_END_OF_STREAM) {
            std::cout << "Camera reached end of stream" << std::endl;
            dwSensor_reset(laneObjectNet.m_cameraSensor);
            continue;
        } else if (result != DW_SUCCESS) {
            std::cerr << "Cannot read frame: " << dwGetStatusName(result) << std::endl;
            laneObjectNet.m_run = false;
            continue;
        }
        laneObjectNet.m_rgbaImage = laneObjectNet.m_sensorIO->getCudaRgba();
        laneObjectNet.m_yuvImage = laneObjectNet.m_sensorIO->getCudaYuv();
        frameGL = laneObjectNet.m_sensorIO->getGlRgbaFrame();
        if (frameGL) {
            dwRenderer_renderTexture(frameGL->tex, frameGL->target, laneObjectNet.m_renderer);
        }

        laneObjectNet.runDetector();
        laneObjectNet.runSingleCameraPipeline();

        //plot objects and lanes
        laneObjectNet.plotLaneAndObjects();

        laneObjectNet.m_sensorIO->releaseCudaYuv();
        laneObjectNet.m_sensorIO->releaseCudaRgba();
        laneObjectNet.m_window->swapBuffers();
        CHECK_GL_ERROR();
    }
    //---------------------------dnn------------------------//
    if (frameGL) {
        laneObjectNet.m_sensorIO->releaseGLRgbaFrame();
        laneObjectNet.m_sensorIO->releaseFrame();
    }
    //---------------------------dnn------------------------//
    // Release modules and driveworks.
    laneObjectNet.releaseModules();
    laneObjectNet.releaseSampleApp();

    return 0;
}

 

10-06 12:58