Vision Library - Files
Vision.h
///
/// \file Vision.h
///
/// Vision library header file
///
/// Copyright (c) 2019 AnyConnect. All rights reserved.
///
///
#ifndef _VISION_H_
#define _VISION_H_
#include "Access.h"
#include \<memory>
#include \<vector>
#include \<string>
#include \<functional>
namespace com { /**\< \namespace com */
namespace anyconnect { /**\< \namespace anyconnect */
namespace vision { /**\< \namespace vision */
///
/// \brief Vision return states.
///
/// Defines Vision success and failure states.
///
enum class VisionRet {
OK, ///\< Successful invocation.
NOT_STARTED, ///\< Vision library is not started.
INVALID_PARAMETERS, ///\< Invalid parameters.
FAIL, ///\< Generic failure.
};
enum class MediaType {
AUDIO,
VIDEO,
SENSOR
};
enum class SensorType {
DIGITAL,
VECTOR_DATA,
MIXED
};
enum class VisionEventType {
EVENT,
RECORD,
DEVICE_STATUS
};
// Internal usage purpose only
typedef struct
{
int32_t bufferSize; ///\< Number of bytes in FIFO. Power of 2.
int32_t writeIndex; ///\< Index of next writable byte.
int32_t readIndex; ///\< Index of next readable byte.
int32_t bigMask; ///\< Used for wrapping indices with extra bit to distinguish full/empty.
int32_t smallMask; ///\< Used for fitting indices to buffer.
int32_t start_frame;
std::string imagerSource; ///\< Used for setting the imager number.
struct timespec starttime; ///\< Used to indicate the time of the start frame.
int width;
int height;
unsigned char *buffer;
bool isMotionData;
}RingBuffer;
///
/// \brief Euclidean vector Data for sensor input/output
///
/// Defines 3 Dimensional Euclidean vector data.
/// This is used for 3 axis data of accelerometer magnetometer gyroscope.
///
typedef struct SensorVector
{
private:
float x;
float y;
float z;
int id;
public:
void set_x(float x);
void set_y(float y);
void set_z(float z);
void set_id(int id);
float get_x();
float get_y();
float get_z();
int get_id();
bool absvalue;
int capture_interval; // in milliseconds
std::string UUID;
bool IsEqual( struct SensorVector & r);
}SensorVectorData;
///
/// \brief DigitalData
///
/// Defines Digital Data.
///
typedef struct
{
unsigned char data;
std::string name;
int id;
int capture_interval; // in milliseconds
std::string UUID;
}DigitalData;
typedef struct
{
int id; // id of the sensor
SensorType stype; // type of the sensor
union
{
DigitalData *digitalData;
SensorVectorData *vectorData;
};
struct timespec capturedtime;
}SensorData;
///
///// Handler for Endpoint peer connection.
/////
///// \param[out] apiStatus - OK on success, other values on failure.
/////
typedef std::function\<void(VisionEventType eventType, std::string eventSource, std::string eventMessage, time_t eventTime, unsigned char* data, int size)> VisionEventHandler;
class Vision {
com::anyconnect::access::Access::Ptr access;
public:
virtual ~Vision() {};
//class builder;
///
/// \brief Set Access object.
///
/// Set Access object.
///
/// \param[in] access - Access object.
///
virtual void setAccess(com::anyconnect::access::Access::Ptr access) = 0;
///
/// Starting Vision library.
///
/// Resources are allocated waiting in idle state for a request to come in
/// or to send a request issued by the application.
///
/// \return VisionRet::OK on successful vision initialization, other values on failure.
///
virtual VisionRet start() = 0;
///
/// Stopping Vision library.
///
/// Graceful shutdown of recording and any other cloud service running,
/// gracefully stops media processing.
/// Close active visions and free resources.
/// \return VisionRet::OK on successful, other values on failure.
///
virtual VisionRet stop() = 0;
///
/// Set the shared audio buffer from which the audio data will be read.
/// Stream library will provide the buffer pointer and it is need to be set.
///
/// \param[in] audioBuffer - the reference pointer of the shared audio buffer
///
/// \return VisionRet::OK on successful, other values on failure.
///
virtual VisionRet setSharedAudioBuffer(RingBuffer &audioBuffer) = 0;
///
/// Set the shared video buffer from which the video data will be read.
/// Stream library will provide the buffer pointer and it is need to be set.
///
/// \param[in] videoBuffer - the reference pointer of the shared video buffer
///
/// \return VisionRet::OK on successful, other values on failure.
///
virtual VisionRet setSharedVideoBuffer(RingBuffer &videoBuffer) = 0;
///
/// Set the external sensor data of the given id if the vision library
/// do not have access to the sensor.
///
/// \param[in] id - the id of the sensor
/// \param[in] sensorData - values of the analog and digital data of the sensor.
///
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet setSensorData(int id, SensorData sensorData) = 0;
///
/// Set the file path for the deep learning and vision usage. Vision library will store deep learning
/// files on the provided path. By default it will be stored in the current binary directory.
///
/// \param[in] filePath - the path of the deep learning model file and label file directory
///
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet addDeepLearingFilePath(std::string filePath) = 0;
///
/// Set the threshold for the vision event (sensor, audio, video)
///
/// \param[in] id - the id of the model
/// \param[in] threshold - values of the threshold in range of
/// (0-100).
/// \param[in] MediaType - Audio, video or sensor.
///
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet setThreshold(int id, MediaType media, double threshold) = 0;
///
/// Enable or disable VideoAnalytic.
///
/// \param[in] enableVideoAnalytic - TRUE will enable VideoAnalytic. FALSE will disable it.
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet enableVideoAnalytic(bool enableVideoAnalytic) = 0;
///
/// Get the status of VideoAnalytic.
///
/// \return "true" on enabled, false on disabled.
///
virtual bool getVideoAnalyticStatus(void) = 0;
///
/// Enable or disable AudioAnalytic.
///
/// \param[in] enableAudioAnalytic - "true" will enable Audio Analysis, "false" will disable it.
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet enableAudioAnalytic(bool enableAudioAnalytic) = 0;
///
/// Get the status of VideoAnalytic.
///
/// \return "true" on enabled, false on disabled.
///
virtual bool getAudioAnalyticStatus(void) = 0;
///
/// Enable or disable sensor module.
///
/// \param[in] enableSensor - TRUE will enable all the sensors and FALSE will disable it.
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet enableSensorModule(bool enableSensor) = 0;
///
/// Get the status of sensor module.
///
/// \return "true" on enabled, false on disabled.
///
virtual bool getSensorModuleStatus(void) = 0;
///
/// Enable or disable UnsupervisedLearning.
///
/// \param[in] enableUnsupervisedLearning - "true" will enable sensor of Timer, "false" will disable it.
///
/// \return VisionRet::OK on success, other values on failure.
///
virtual VisionRet enableUnsupervisedLearning(bool enableUnsupervisedLearning) = 0;
///
/// Get the status of UnsupervisedLearning.
///
/// \return "true" on enabled, false on disabled.
///
virtual bool getUnsupervisedLearningStatus(void) = 0;
///
/// Set the Vision event listener for sending it to cloud
/// This event will invoked 2 times if any event detecte.
/// First invoked, as soon as event found.
/// And second time when event analysis is done, with result.
/// \return "true" on successful sending.
///
virtual VisionRet setVisionEventListener(VisionEventHandler handler) = 0;
///
/// Set the endpoint ID and owner user ID for mapping.
///
virtual void setEndpointAndUserID(uint64_t ownerID, uint64_t endpointID) = 0;
///
/// Send Frame to Vision Microservice
///
virtual void sendFrame(unsigned char *buf, int size, time_t timestamp=0) = 0;
///
/// Shared pointer to a Vision class object for camera app usage.
///
typedef std::shared_ptr\<Vision> Ptr;
};
} //vision
} //anyconnect
} //com
#endif //_VISION_H_
Updated over 2 years ago