init - 初始化项目
This commit is contained in:
802
modules/gapi/misc/python/pyopencv_gapi.hpp
Normal file
802
modules/gapi/misc/python/pyopencv_gapi.hpp
Normal file
@@ -0,0 +1,802 @@
|
||||
#ifndef OPENCV_GAPI_PYOPENCV_GAPI_HPP
|
||||
#define OPENCV_GAPI_PYOPENCV_GAPI_HPP
|
||||
|
||||
#ifdef HAVE_OPENCV_GAPI
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable: 4503) // "decorated name length exceeded"
|
||||
// on empty_meta(const cv::GMetaArgs&, const cv::GArgs&)
|
||||
#endif
|
||||
|
||||
#include <opencv2/gapi/cpu/gcpukernel.hpp>
|
||||
#include <opencv2/gapi/python/python.hpp>
|
||||
|
||||
// NB: Python wrapper replaces :: with _ for classes
|
||||
using gapi_GKernelPackage = cv::gapi::GKernelPackage;
|
||||
using gapi_GNetPackage = cv::gapi::GNetPackage;
|
||||
using gapi_ie_PyParams = cv::gapi::ie::PyParams;
|
||||
using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
|
||||
using detail_ExtractArgsCallback = cv::detail::ExtractArgsCallback;
|
||||
using detail_ExtractMetaCallback = cv::detail::ExtractMetaCallback;
|
||||
|
||||
// NB: Python wrapper generate T_U for T<U>
|
||||
// This behavior is only observed for inputs
|
||||
using GOpaque_bool = cv::GOpaque<bool>;
|
||||
using GOpaque_int = cv::GOpaque<int>;
|
||||
using GOpaque_double = cv::GOpaque<double>;
|
||||
using GOpaque_float = cv::GOpaque<double>;
|
||||
using GOpaque_string = cv::GOpaque<std::string>;
|
||||
using GOpaque_Point2i = cv::GOpaque<cv::Point>;
|
||||
using GOpaque_Point2f = cv::GOpaque<cv::Point2f>;
|
||||
using GOpaque_Size = cv::GOpaque<cv::Size>;
|
||||
using GOpaque_Rect = cv::GOpaque<cv::Rect>;
|
||||
|
||||
using GArray_bool = cv::GArray<bool>;
|
||||
using GArray_int = cv::GArray<int>;
|
||||
using GArray_double = cv::GArray<double>;
|
||||
using GArray_float = cv::GArray<double>;
|
||||
using GArray_string = cv::GArray<std::string>;
|
||||
using GArray_Point2i = cv::GArray<cv::Point>;
|
||||
using GArray_Point2f = cv::GArray<cv::Point2f>;
|
||||
using GArray_Size = cv::GArray<cv::Size>;
|
||||
using GArray_Rect = cv::GArray<cv::Rect>;
|
||||
using GArray_Scalar = cv::GArray<cv::Scalar>;
|
||||
using GArray_Mat = cv::GArray<cv::Mat>;
|
||||
using GArray_GMat = cv::GArray<cv::GMat>;
|
||||
|
||||
// FIXME: Python wrapper generate code without namespace std,
|
||||
// so it cause error: "string wasn't declared"
|
||||
// WA: Create using
|
||||
using std::string;
|
||||
|
||||
template <>
|
||||
bool pyopencv_to(PyObject* obj, std::vector<GCompileArg>& value, const ArgInfo& info)
|
||||
{
|
||||
return pyopencv_to_generic_vec(obj, value, info);
|
||||
}
|
||||
|
||||
template <>
|
||||
PyObject* pyopencv_from(const std::vector<GCompileArg>& value)
|
||||
{
|
||||
return pyopencv_from_generic_vec(value);
|
||||
}
|
||||
|
||||
template <>
|
||||
bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info)
|
||||
{
|
||||
return pyopencv_to_generic_vec(obj, value, info);
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const cv::detail::OpaqueRef& o)
|
||||
{
|
||||
switch (o.getKind())
|
||||
{
|
||||
case cv::detail::OpaqueKind::CV_BOOL : return pyopencv_from(o.rref<bool>());
|
||||
case cv::detail::OpaqueKind::CV_INT : return pyopencv_from(o.rref<int>());
|
||||
case cv::detail::OpaqueKind::CV_DOUBLE : return pyopencv_from(o.rref<double>());
|
||||
case cv::detail::OpaqueKind::CV_FLOAT : return pyopencv_from(o.rref<float>());
|
||||
case cv::detail::OpaqueKind::CV_STRING : return pyopencv_from(o.rref<std::string>());
|
||||
case cv::detail::OpaqueKind::CV_POINT : return pyopencv_from(o.rref<cv::Point>());
|
||||
case cv::detail::OpaqueKind::CV_POINT2F : return pyopencv_from(o.rref<cv::Point2f>());
|
||||
case cv::detail::OpaqueKind::CV_SIZE : return pyopencv_from(o.rref<cv::Size>());
|
||||
case cv::detail::OpaqueKind::CV_RECT : return pyopencv_from(o.rref<cv::Rect>());
|
||||
case cv::detail::OpaqueKind::CV_UNKNOWN : break;
|
||||
case cv::detail::OpaqueKind::CV_UINT64 : break;
|
||||
case cv::detail::OpaqueKind::CV_SCALAR : break;
|
||||
case cv::detail::OpaqueKind::CV_MAT : break;
|
||||
case cv::detail::OpaqueKind::CV_DRAW_PRIM : break;
|
||||
}
|
||||
|
||||
PyErr_SetString(PyExc_TypeError, "Unsupported GOpaque type");
|
||||
return NULL;
|
||||
};
|
||||
|
||||
template <>
|
||||
PyObject* pyopencv_from(const cv::detail::VectorRef& v)
|
||||
{
|
||||
switch (v.getKind())
|
||||
{
|
||||
case cv::detail::OpaqueKind::CV_BOOL : return pyopencv_from_generic_vec(v.rref<bool>());
|
||||
case cv::detail::OpaqueKind::CV_INT : return pyopencv_from_generic_vec(v.rref<int>());
|
||||
case cv::detail::OpaqueKind::CV_DOUBLE : return pyopencv_from_generic_vec(v.rref<double>());
|
||||
case cv::detail::OpaqueKind::CV_FLOAT : return pyopencv_from_generic_vec(v.rref<float>());
|
||||
case cv::detail::OpaqueKind::CV_STRING : return pyopencv_from_generic_vec(v.rref<std::string>());
|
||||
case cv::detail::OpaqueKind::CV_POINT : return pyopencv_from_generic_vec(v.rref<cv::Point>());
|
||||
case cv::detail::OpaqueKind::CV_POINT2F : return pyopencv_from_generic_vec(v.rref<cv::Point2f>());
|
||||
case cv::detail::OpaqueKind::CV_SIZE : return pyopencv_from_generic_vec(v.rref<cv::Size>());
|
||||
case cv::detail::OpaqueKind::CV_RECT : return pyopencv_from_generic_vec(v.rref<cv::Rect>());
|
||||
case cv::detail::OpaqueKind::CV_SCALAR : return pyopencv_from_generic_vec(v.rref<cv::Scalar>());
|
||||
case cv::detail::OpaqueKind::CV_MAT : return pyopencv_from_generic_vec(v.rref<cv::Mat>());
|
||||
case cv::detail::OpaqueKind::CV_UNKNOWN : break;
|
||||
case cv::detail::OpaqueKind::CV_UINT64 : break;
|
||||
case cv::detail::OpaqueKind::CV_DRAW_PRIM : break;
|
||||
}
|
||||
|
||||
PyErr_SetString(PyExc_TypeError, "Unsupported GArray type");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
template <>
|
||||
PyObject* pyopencv_from(const GRunArg& v)
|
||||
{
|
||||
switch (v.index())
|
||||
{
|
||||
case GRunArg::index_of<cv::Mat>():
|
||||
return pyopencv_from(util::get<cv::Mat>(v));
|
||||
|
||||
case GRunArg::index_of<cv::Scalar>():
|
||||
return pyopencv_from(util::get<cv::Scalar>(v));
|
||||
|
||||
case GRunArg::index_of<cv::detail::VectorRef>():
|
||||
return pyopencv_from(util::get<cv::detail::VectorRef>(v));
|
||||
|
||||
case GRunArg::index_of<cv::detail::OpaqueRef>():
|
||||
return pyopencv_from(util::get<cv::detail::OpaqueRef>(v));
|
||||
}
|
||||
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const GRunArgs& value)
|
||||
{
|
||||
size_t i, n = value.size();
|
||||
|
||||
// NB: It doesn't make sense to return list with a single element
|
||||
if (n == 1)
|
||||
{
|
||||
PyObject* item = pyopencv_from(value[0]);
|
||||
if(!item)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return item;
|
||||
}
|
||||
|
||||
PyObject* list = PyList_New(n);
|
||||
for(i = 0; i < n; ++i)
|
||||
{
|
||||
PyObject* item = pyopencv_from(value[i]);
|
||||
if(!item)
|
||||
{
|
||||
Py_DECREF(list);
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to unpack GRunArgs");
|
||||
return NULL;
|
||||
}
|
||||
PyList_SetItem(list, i, item);
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
template<>
|
||||
bool pyopencv_to(PyObject* obj, GMetaArgs& value, const ArgInfo& info)
|
||||
{
|
||||
return pyopencv_to_generic_vec(obj, value, info);
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const GMetaArgs& value)
|
||||
{
|
||||
return pyopencv_from_generic_vec(value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void pyopencv_to_with_check(PyObject* from, T& to, const std::string& msg = "")
|
||||
{
|
||||
if (!pyopencv_to(from, to, ArgInfo("", false)))
|
||||
{
|
||||
cv::util::throw_error(std::logic_error(msg));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void pyopencv_to_generic_vec_with_check(PyObject* from,
|
||||
std::vector<T>& to,
|
||||
const std::string& msg = "")
|
||||
{
|
||||
if (!pyopencv_to_generic_vec(from, to, ArgInfo("", false)))
|
||||
{
|
||||
cv::util::throw_error(std::logic_error(msg));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw)
|
||||
{
|
||||
using namespace cv;
|
||||
|
||||
GProtoArgs args;
|
||||
Py_ssize_t size = PyTuple_Size(py_args);
|
||||
args.reserve(size);
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
PyObject* item = PyTuple_GetItem(py_args, i);
|
||||
if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GScalar_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GScalar_t*>(item)->v);
|
||||
}
|
||||
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GMat_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GMat_t*>(item)->v);
|
||||
}
|
||||
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueT_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GOpaqueT_t*>(item)->v.strip());
|
||||
}
|
||||
else if (PyObject_TypeCheck(item, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayT_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GArrayT_t*>(item)->v.strip());
|
||||
}
|
||||
else
|
||||
{
|
||||
PyErr_SetString(PyExc_TypeError, "Unsupported type for cv.GIn()/cv.GOut()");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return pyopencv_from<T>(T{std::move(args)});
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_GIn(PyObject* , PyObject* py_args, PyObject* kw)
|
||||
{
|
||||
return extract_proto_args<GProtoInputArgs>(py_args, kw);
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_GOut(PyObject* , PyObject* py_args, PyObject* kw)
|
||||
{
|
||||
return extract_proto_args<GProtoOutputArgs>(py_args, kw);
|
||||
}
|
||||
|
||||
static cv::detail::OpaqueRef extract_opaque_ref(PyObject* from, cv::detail::OpaqueKind kind)
|
||||
{
|
||||
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
|
||||
{ \
|
||||
O obj{}; \
|
||||
pyopencv_to_with_check(from, obj, "Failed to obtain " # O); \
|
||||
return cv::detail::OpaqueRef{std::move(obj)}; \
|
||||
}
|
||||
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
|
||||
switch (kind)
|
||||
{
|
||||
HANDLE_CASE(BOOL, bool);
|
||||
HANDLE_CASE(INT, int);
|
||||
HANDLE_CASE(DOUBLE, double);
|
||||
HANDLE_CASE(FLOAT, float);
|
||||
HANDLE_CASE(STRING, std::string);
|
||||
HANDLE_CASE(POINT, cv::Point);
|
||||
HANDLE_CASE(POINT2F, cv::Point2f);
|
||||
HANDLE_CASE(SIZE, cv::Size);
|
||||
HANDLE_CASE(RECT, cv::Rect);
|
||||
UNSUPPORTED(UNKNOWN);
|
||||
UNSUPPORTED(UINT64);
|
||||
UNSUPPORTED(SCALAR);
|
||||
UNSUPPORTED(MAT);
|
||||
UNSUPPORTED(DRAW_PRIM);
|
||||
#undef HANDLE_CASE
|
||||
#undef UNSUPPORTED
|
||||
}
|
||||
util::throw_error(std::logic_error("Unsupported type for GOpaqueT"));
|
||||
}
|
||||
|
||||
static cv::detail::VectorRef extract_vector_ref(PyObject* from, cv::detail::OpaqueKind kind)
|
||||
{
|
||||
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
|
||||
{ \
|
||||
std::vector<O> obj; \
|
||||
pyopencv_to_generic_vec_with_check(from, obj, "Failed to obtain vector of " # O); \
|
||||
return cv::detail::VectorRef{std::move(obj)}; \
|
||||
}
|
||||
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
|
||||
switch (kind)
|
||||
{
|
||||
HANDLE_CASE(BOOL, bool);
|
||||
HANDLE_CASE(INT, int);
|
||||
HANDLE_CASE(DOUBLE, double);
|
||||
HANDLE_CASE(FLOAT, float);
|
||||
HANDLE_CASE(STRING, std::string);
|
||||
HANDLE_CASE(POINT, cv::Point);
|
||||
HANDLE_CASE(POINT2F, cv::Point2f);
|
||||
HANDLE_CASE(SIZE, cv::Size);
|
||||
HANDLE_CASE(RECT, cv::Rect);
|
||||
HANDLE_CASE(SCALAR, cv::Scalar);
|
||||
HANDLE_CASE(MAT, cv::Mat);
|
||||
UNSUPPORTED(UNKNOWN);
|
||||
UNSUPPORTED(UINT64);
|
||||
UNSUPPORTED(DRAW_PRIM);
|
||||
#undef HANDLE_CASE
|
||||
#undef UNSUPPORTED
|
||||
}
|
||||
util::throw_error(std::logic_error("Unsupported type for GArrayT"));
|
||||
}
|
||||
|
||||
static cv::GRunArg extract_run_arg(const cv::GTypeInfo& info, PyObject* item)
|
||||
{
|
||||
switch (info.shape)
|
||||
{
|
||||
case cv::GShape::GMAT:
|
||||
{
|
||||
// NB: In case streaming it can be IStreamSource or cv::Mat
|
||||
if (PyObject_TypeCheck(item,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_gapi_wip_IStreamSource_TypePtr)))
|
||||
{
|
||||
cv::gapi::wip::IStreamSource::Ptr source =
|
||||
reinterpret_cast<pyopencv_gapi_wip_IStreamSource_t*>(item)->v;
|
||||
return source;
|
||||
}
|
||||
cv::Mat obj;
|
||||
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Mat");
|
||||
return obj;
|
||||
}
|
||||
case cv::GShape::GSCALAR:
|
||||
{
|
||||
cv::Scalar obj;
|
||||
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Scalar");
|
||||
return obj;
|
||||
}
|
||||
case cv::GShape::GOPAQUE:
|
||||
{
|
||||
return extract_opaque_ref(item, info.kind);
|
||||
}
|
||||
case cv::GShape::GARRAY:
|
||||
{
|
||||
return extract_vector_ref(item, info.kind);
|
||||
}
|
||||
case cv::GShape::GFRAME:
|
||||
{
|
||||
// NB: Isn't supported yet.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
util::throw_error(std::logic_error("Unsupported output shape"));
|
||||
}
|
||||
|
||||
static cv::GRunArgs extract_run_args(const cv::GTypesInfo& info, PyObject* py_args)
|
||||
{
|
||||
cv::GRunArgs args;
|
||||
Py_ssize_t tuple_size = PyTuple_Size(py_args);
|
||||
args.reserve(tuple_size);
|
||||
|
||||
for (int i = 0; i < tuple_size; ++i)
|
||||
{
|
||||
args.push_back(extract_run_arg(info[i], PyTuple_GetItem(py_args, i)));
|
||||
}
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
static cv::GMetaArg extract_meta_arg(const cv::GTypeInfo& info, PyObject* item)
|
||||
{
|
||||
switch (info.shape)
|
||||
{
|
||||
case cv::GShape::GMAT:
|
||||
{
|
||||
cv::Mat obj;
|
||||
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Mat");
|
||||
return cv::GMetaArg{cv::descr_of(obj)};
|
||||
}
|
||||
case cv::GShape::GSCALAR:
|
||||
{
|
||||
cv::Scalar obj;
|
||||
pyopencv_to_with_check(item, obj, "Failed to obtain cv::Scalar");
|
||||
return cv::GMetaArg{cv::descr_of(obj)};
|
||||
}
|
||||
case cv::GShape::GARRAY:
|
||||
{
|
||||
return cv::GMetaArg{cv::empty_array_desc()};
|
||||
}
|
||||
case cv::GShape::GOPAQUE:
|
||||
{
|
||||
return cv::GMetaArg{cv::empty_gopaque_desc()};
|
||||
}
|
||||
case cv::GShape::GFRAME:
|
||||
{
|
||||
// NB: Isn't supported yet.
|
||||
break;
|
||||
}
|
||||
}
|
||||
util::throw_error(std::logic_error("Unsupported output shape"));
|
||||
}
|
||||
|
||||
static cv::GMetaArgs extract_meta_args(const cv::GTypesInfo& info, PyObject* py_args)
|
||||
{
|
||||
cv::GMetaArgs metas;
|
||||
Py_ssize_t tuple_size = PyTuple_Size(py_args);
|
||||
metas.reserve(tuple_size);
|
||||
|
||||
for (int i = 0; i < tuple_size; ++i)
|
||||
{
|
||||
metas.push_back(extract_meta_arg(info[i], PyTuple_GetItem(py_args, i)));
|
||||
}
|
||||
|
||||
return metas;
|
||||
}
|
||||
|
||||
inline PyObject* extract_opaque_value(const cv::GArg& value)
|
||||
{
|
||||
GAPI_Assert(value.kind != cv::detail::ArgKind::GOBJREF);
|
||||
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
|
||||
{ \
|
||||
return pyopencv_from(value.get<O>()); \
|
||||
}
|
||||
|
||||
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
|
||||
switch (value.opaque_kind)
|
||||
{
|
||||
HANDLE_CASE(BOOL, bool);
|
||||
HANDLE_CASE(INT, int);
|
||||
HANDLE_CASE(DOUBLE, double);
|
||||
HANDLE_CASE(FLOAT, float);
|
||||
HANDLE_CASE(STRING, std::string);
|
||||
HANDLE_CASE(POINT, cv::Point);
|
||||
HANDLE_CASE(POINT2F, cv::Point2f);
|
||||
HANDLE_CASE(SIZE, cv::Size);
|
||||
HANDLE_CASE(RECT, cv::Rect);
|
||||
HANDLE_CASE(SCALAR, cv::Scalar);
|
||||
HANDLE_CASE(MAT, cv::Mat);
|
||||
UNSUPPORTED(UNKNOWN);
|
||||
UNSUPPORTED(UINT64);
|
||||
UNSUPPORTED(DRAW_PRIM);
|
||||
#undef HANDLE_CASE
|
||||
#undef UNSUPPORTED
|
||||
}
|
||||
util::throw_error(std::logic_error("Unsupported kernel input type"));
|
||||
}
|
||||
|
||||
static cv::GRunArgs run_py_kernel(PyObject* kernel,
|
||||
const cv::gapi::python::GPythonContext &ctx)
|
||||
{
|
||||
const auto& ins = ctx.ins;
|
||||
const auto& in_metas = ctx.in_metas;
|
||||
const auto& out_info = ctx.out_info;
|
||||
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
cv::GRunArgs outs;
|
||||
try
|
||||
{
|
||||
int in_idx = 0;
|
||||
PyObject* args = PyTuple_New(ins.size());
|
||||
for (size_t i = 0; i < ins.size(); ++i)
|
||||
{
|
||||
// NB: If meta is monostate then object isn't associated with G-TYPE, so in case it
|
||||
// kind matches with supported types do conversion from c++ to python, if not (CV_UNKNOWN)
|
||||
// obtain PyObject* and pass as-is.
|
||||
if (cv::util::holds_alternative<cv::util::monostate>(in_metas[i]))
|
||||
{
|
||||
PyTuple_SetItem(args, i,
|
||||
ins[i].opaque_kind != cv::detail::OpaqueKind::CV_UNKNOWN ? extract_opaque_value(ins[i])
|
||||
: ins[i].get<PyObject*>());
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (in_metas[i].index())
|
||||
{
|
||||
case cv::GMetaArg::index_of<cv::GMatDesc>():
|
||||
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::Mat>()));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GScalarDesc>():
|
||||
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::Scalar>()));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GOpaqueDesc>():
|
||||
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::detail::OpaqueRef>()));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GArrayDesc>():
|
||||
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::detail::VectorRef>()));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GFrameDesc>():
|
||||
util::throw_error(std::logic_error("GFrame isn't supported for custom operation"));
|
||||
break;
|
||||
}
|
||||
++in_idx;
|
||||
}
|
||||
|
||||
PyObject* result = PyObject_CallObject(kernel, args);
|
||||
|
||||
outs = out_info.size() == 1 ? cv::GRunArgs{extract_run_arg(out_info[0], result)}
|
||||
: extract_run_args(out_info, result);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
PyGILState_Release(gstate);
|
||||
throw;
|
||||
}
|
||||
PyGILState_Release(gstate);
|
||||
|
||||
return outs;
|
||||
}
|
||||
|
||||
// FIXME: Now it's impossible to obtain meta function from operation,
|
||||
// because kernel connects to operation only by id (string).
|
||||
static cv::GMetaArgs empty_meta(const cv::GMetaArgs &, const cv::GArgs &) {
|
||||
return {};
|
||||
}
|
||||
|
||||
static GMetaArg get_meta_arg(PyObject* obj)
|
||||
{
|
||||
if (PyObject_TypeCheck(obj,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GMatDesc_TypePtr)))
|
||||
{
|
||||
return cv::GMetaArg{reinterpret_cast<pyopencv_GMatDesc_t*>(obj)->v};
|
||||
}
|
||||
else if (PyObject_TypeCheck(obj,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GScalarDesc_TypePtr)))
|
||||
{
|
||||
return cv::GMetaArg{reinterpret_cast<pyopencv_GScalarDesc_t*>(obj)->v};
|
||||
}
|
||||
else if (PyObject_TypeCheck(obj,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GArrayDesc_TypePtr)))
|
||||
{
|
||||
return cv::GMetaArg{reinterpret_cast<pyopencv_GArrayDesc_t*>(obj)->v};
|
||||
}
|
||||
else if (PyObject_TypeCheck(obj,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueDesc_TypePtr)))
|
||||
{
|
||||
return cv::GMetaArg{reinterpret_cast<pyopencv_GOpaqueDesc_t*>(obj)->v};
|
||||
}
|
||||
else
|
||||
{
|
||||
util::throw_error(std::logic_error("Unsupported output meta type"));
|
||||
}
|
||||
}
|
||||
|
||||
static cv::GMetaArgs get_meta_args(PyObject* tuple)
|
||||
{
|
||||
size_t size = PyTuple_Size(tuple);
|
||||
|
||||
cv::GMetaArgs metas;
|
||||
metas.reserve(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
metas.push_back(get_meta_arg(PyTuple_GetItem(tuple, i)));
|
||||
}
|
||||
|
||||
return metas;
|
||||
}
|
||||
|
||||
static GMetaArgs python_meta(PyObject* outMeta, const cv::GMetaArgs &meta, const cv::GArgs &gargs) {
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
cv::GMetaArgs out_metas;
|
||||
try
|
||||
{
|
||||
PyObject* args = PyTuple_New(meta.size());
|
||||
size_t idx = 0;
|
||||
for (auto&& m : meta)
|
||||
{
|
||||
switch (m.index())
|
||||
{
|
||||
case cv::GMetaArg::index_of<cv::GMatDesc>():
|
||||
PyTuple_SetItem(args, idx, pyopencv_from(cv::util::get<cv::GMatDesc>(m)));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GScalarDesc>():
|
||||
PyTuple_SetItem(args, idx, pyopencv_from(cv::util::get<cv::GScalarDesc>(m)));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GArrayDesc>():
|
||||
PyTuple_SetItem(args, idx, pyopencv_from(cv::util::get<cv::GArrayDesc>(m)));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GOpaqueDesc>():
|
||||
PyTuple_SetItem(args, idx, pyopencv_from(cv::util::get<cv::GOpaqueDesc>(m)));
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::util::monostate>():
|
||||
PyTuple_SetItem(args, idx, gargs[idx].get<PyObject*>());
|
||||
break;
|
||||
case cv::GMetaArg::index_of<cv::GFrameDesc>():
|
||||
util::throw_error(std::logic_error("GFrame isn't supported for custom operation"));
|
||||
break;
|
||||
}
|
||||
++idx;
|
||||
}
|
||||
PyObject* result = PyObject_CallObject(outMeta, args);
|
||||
out_metas = PyTuple_Check(result) ? get_meta_args(result)
|
||||
: cv::GMetaArgs{get_meta_arg(result)};
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
PyGILState_Release(gstate);
|
||||
throw;
|
||||
}
|
||||
PyGILState_Release(gstate);
|
||||
|
||||
return out_metas;
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_gapi_kernels(PyObject* , PyObject* py_args, PyObject*)
|
||||
{
|
||||
using namespace cv;
|
||||
gapi::GKernelPackage pkg;
|
||||
Py_ssize_t size = PyTuple_Size(py_args);
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
PyObject* pair = PyTuple_GetItem(py_args, i);
|
||||
PyObject* kernel = PyTuple_GetItem(pair, 0);
|
||||
|
||||
std::string id;
|
||||
if (!pyopencv_to(PyTuple_GetItem(pair, 1), id, ArgInfo("id", false)))
|
||||
{
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to obtain: kernel id must be a string");
|
||||
return NULL;
|
||||
}
|
||||
Py_INCREF(kernel);
|
||||
gapi::python::GPythonFunctor f(id.c_str(),
|
||||
empty_meta,
|
||||
std::bind(run_py_kernel,
|
||||
kernel,
|
||||
std::placeholders::_1));
|
||||
pkg.include(f);
|
||||
}
|
||||
return pyopencv_from(pkg);
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_gapi_op(PyObject* , PyObject* py_args, PyObject*)
|
||||
{
|
||||
using namespace cv;
|
||||
Py_ssize_t size = PyTuple_Size(py_args);
|
||||
std::string id;
|
||||
if (!pyopencv_to(PyTuple_GetItem(py_args, 0), id, ArgInfo("id", false)))
|
||||
{
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to obtain: operation id must be a string");
|
||||
return NULL;
|
||||
}
|
||||
PyObject* outMeta = PyTuple_GetItem(py_args, 1);
|
||||
Py_INCREF(outMeta);
|
||||
|
||||
cv::GArgs args;
|
||||
for (int i = 2; i < size; i++)
|
||||
{
|
||||
PyObject* item = PyTuple_GetItem(py_args, i);
|
||||
if (PyObject_TypeCheck(item,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GMat_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GMat_t*>(item)->v);
|
||||
}
|
||||
else if (PyObject_TypeCheck(item,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GScalar_TypePtr)))
|
||||
{
|
||||
args.emplace_back(reinterpret_cast<pyopencv_GScalar_t*>(item)->v);
|
||||
}
|
||||
else if (PyObject_TypeCheck(item,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueT_TypePtr)))
|
||||
{
|
||||
auto&& arg = reinterpret_cast<pyopencv_GOpaqueT_t*>(item)->v.arg();
|
||||
#define HC(T, K) case cv::GOpaqueT::Storage:: index_of<cv::GOpaque<T>>(): \
|
||||
args.emplace_back(cv::util::get<cv::GOpaque<T>>(arg)); \
|
||||
break; \
|
||||
|
||||
SWITCH(arg.index(), GOPAQUE_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
else if (PyObject_TypeCheck(item,
|
||||
reinterpret_cast<PyTypeObject*>(pyopencv_GArrayT_TypePtr)))
|
||||
{
|
||||
auto&& arg = reinterpret_cast<pyopencv_GArrayT_t*>(item)->v.arg();
|
||||
#define HC(T, K) case cv::GArrayT::Storage:: index_of<cv::GArray<T>>(): \
|
||||
args.emplace_back(cv::util::get<cv::GArray<T>>(arg)); \
|
||||
break; \
|
||||
|
||||
SWITCH(arg.index(), GARRAY_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
else
|
||||
{
|
||||
Py_INCREF(item);
|
||||
args.emplace_back(cv::GArg(item));
|
||||
}
|
||||
}
|
||||
|
||||
cv::GKernel::M outMetaWrapper = std::bind(python_meta,
|
||||
outMeta,
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2);
|
||||
return pyopencv_from(cv::gapi::wip::op(id, outMetaWrapper, std::move(args)));
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_gin(PyObject*, PyObject* py_args, PyObject*)
|
||||
{
|
||||
Py_INCREF(py_args);
|
||||
auto callback = cv::detail::ExtractArgsCallback{[=](const cv::GTypesInfo& info)
|
||||
{
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
cv::GRunArgs args;
|
||||
try
|
||||
{
|
||||
args = extract_run_args(info, py_args);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
PyGILState_Release(gstate);
|
||||
throw;
|
||||
}
|
||||
PyGILState_Release(gstate);
|
||||
return args;
|
||||
}};
|
||||
|
||||
return pyopencv_from(callback);
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_cv_descr_of(PyObject*, PyObject* py_args, PyObject*)
|
||||
{
|
||||
Py_INCREF(py_args);
|
||||
auto callback = cv::detail::ExtractMetaCallback{[=](const cv::GTypesInfo& info)
|
||||
{
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
cv::GMetaArgs args;
|
||||
try
|
||||
{
|
||||
args = extract_meta_args(info, py_args);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
PyGILState_Release(gstate);
|
||||
throw;
|
||||
}
|
||||
PyGILState_Release(gstate);
|
||||
return args;
|
||||
}};
|
||||
return pyopencv_from(callback);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
struct PyOpenCV_Converter<cv::GArray<T>>
|
||||
{
|
||||
static PyObject* from(const cv::GArray<T>& p)
|
||||
{
|
||||
return pyopencv_from(cv::GArrayT(p));
|
||||
}
|
||||
static bool to(PyObject *obj, cv::GArray<T>& value, const ArgInfo& info)
|
||||
{
|
||||
if (PyObject_TypeCheck(obj, reinterpret_cast<PyTypeObject*>(pyopencv_GArrayT_TypePtr)))
|
||||
{
|
||||
auto& array = reinterpret_cast<pyopencv_GArrayT_t*>(obj)->v;
|
||||
try {
|
||||
value = cv::util::get<cv::GArray<T>>(array.arg());
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct PyOpenCV_Converter<cv::GOpaque<T>>
|
||||
{
|
||||
static PyObject* from(const cv::GOpaque<T>& p)
|
||||
{
|
||||
return pyopencv_from(cv::GOpaqueT(p));
|
||||
}
|
||||
static bool to(PyObject *obj, cv::GOpaque<T>& value, const ArgInfo& info)
|
||||
{
|
||||
if (PyObject_TypeCheck(obj, reinterpret_cast<PyTypeObject*>(pyopencv_GOpaqueT_TypePtr)))
|
||||
{
|
||||
auto& opaque = reinterpret_cast<pyopencv_GOpaqueT_t*>(obj)->v;
|
||||
try {
|
||||
value = cv::util::get<cv::GOpaque<T>>(opaque.arg());
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// extend cv.gapi.wip. methods
|
||||
#define PYOPENCV_EXTRA_METHODS_GAPI_WIP \
|
||||
{"kernels", CV_PY_FN_WITH_KW(pyopencv_cv_gapi_kernels), "kernels(...) -> GKernelPackage"}, \
|
||||
{"op", CV_PY_FN_WITH_KW_(pyopencv_cv_gapi_op, 0), "kernels(...) -> retval\n"}, \
|
||||
|
||||
|
||||
#endif // HAVE_OPENCV_GAPI
|
||||
#endif // OPENCV_GAPI_PYOPENCV_GAPI_HPP
|
||||
327
modules/gapi/misc/python/python_bridge.hpp
Normal file
327
modules/gapi/misc/python/python_bridge.hpp
Normal file
@@ -0,0 +1,327 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_PYTHON_BRIDGE_HPP
|
||||
#define OPENCV_GAPI_PYTHON_BRIDGE_HPP
|
||||
|
||||
#include <opencv2/gapi.hpp>
|
||||
#include <opencv2/gapi/garg.hpp>
|
||||
#include <opencv2/gapi/gopaque.hpp>
|
||||
|
||||
#define ID(T, E) T
|
||||
#define ID_(T, E) ID(T, E),
|
||||
|
||||
#define WRAP_ARGS(T, E, G) \
|
||||
G(T, E)
|
||||
|
||||
#define SWITCH(type, LIST_G, HC) \
|
||||
switch(type) { \
|
||||
LIST_G(HC, HC) \
|
||||
default: \
|
||||
GAPI_Assert(false && "Unsupported type"); \
|
||||
}
|
||||
|
||||
#define GARRAY_TYPE_LIST_G(G, G2) \
|
||||
WRAP_ARGS(bool , cv::gapi::ArgType::CV_BOOL, G) \
|
||||
WRAP_ARGS(int , cv::gapi::ArgType::CV_INT, G) \
|
||||
WRAP_ARGS(double , cv::gapi::ArgType::CV_DOUBLE, G) \
|
||||
WRAP_ARGS(float , cv::gapi::ArgType::CV_FLOAT, G) \
|
||||
WRAP_ARGS(std::string , cv::gapi::ArgType::CV_STRING, G) \
|
||||
WRAP_ARGS(cv::Point , cv::gapi::ArgType::CV_POINT, G) \
|
||||
WRAP_ARGS(cv::Point2f , cv::gapi::ArgType::CV_POINT2F, G) \
|
||||
WRAP_ARGS(cv::Size , cv::gapi::ArgType::CV_SIZE, G) \
|
||||
WRAP_ARGS(cv::Rect , cv::gapi::ArgType::CV_RECT, G) \
|
||||
WRAP_ARGS(cv::Scalar , cv::gapi::ArgType::CV_SCALAR, G) \
|
||||
WRAP_ARGS(cv::Mat , cv::gapi::ArgType::CV_MAT, G) \
|
||||
WRAP_ARGS(cv::GMat , cv::gapi::ArgType::CV_GMAT, G2)
|
||||
|
||||
#define GOPAQUE_TYPE_LIST_G(G, G2) \
|
||||
WRAP_ARGS(bool , cv::gapi::ArgType::CV_BOOL, G) \
|
||||
WRAP_ARGS(int , cv::gapi::ArgType::CV_INT, G) \
|
||||
WRAP_ARGS(double , cv::gapi::ArgType::CV_DOUBLE, G) \
|
||||
WRAP_ARGS(float , cv::gapi::ArgType::CV_FLOAT, G) \
|
||||
WRAP_ARGS(std::string , cv::gapi::ArgType::CV_STRING, G) \
|
||||
WRAP_ARGS(cv::Point , cv::gapi::ArgType::CV_POINT, G) \
|
||||
WRAP_ARGS(cv::Point2f , cv::gapi::ArgType::CV_POINT2F, G) \
|
||||
WRAP_ARGS(cv::Size , cv::gapi::ArgType::CV_SIZE, G) \
|
||||
WRAP_ARGS(cv::Rect , cv::gapi::ArgType::CV_RECT, G2) \
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
|
||||
// NB: cv.gapi.CV_BOOL in python
|
||||
enum ArgType {
|
||||
CV_BOOL,
|
||||
CV_INT,
|
||||
CV_DOUBLE,
|
||||
CV_FLOAT,
|
||||
CV_STRING,
|
||||
CV_POINT,
|
||||
CV_POINT2F,
|
||||
CV_SIZE,
|
||||
CV_RECT,
|
||||
CV_SCALAR,
|
||||
CV_MAT,
|
||||
CV_GMAT,
|
||||
};
|
||||
|
||||
GAPI_EXPORTS_W inline cv::GInferOutputs infer(const String& name, const cv::GInferInputs& inputs)
|
||||
{
|
||||
return infer<Generic>(name, inputs);
|
||||
}
|
||||
|
||||
GAPI_EXPORTS_W inline GInferOutputs infer(const std::string& name,
|
||||
const cv::GOpaque<cv::Rect>& roi,
|
||||
const GInferInputs& inputs)
|
||||
{
|
||||
return infer<Generic>(name, roi, inputs);
|
||||
}
|
||||
|
||||
GAPI_EXPORTS_W inline GInferListOutputs infer(const std::string& name,
|
||||
const cv::GArray<cv::Rect>& rois,
|
||||
const GInferInputs& inputs)
|
||||
{
|
||||
return infer<Generic>(name, rois, inputs);
|
||||
}
|
||||
|
||||
GAPI_EXPORTS_W inline GInferListOutputs infer2(const std::string& name,
|
||||
const cv::GMat in,
|
||||
const GInferListInputs& inputs)
|
||||
{
|
||||
return infer2<Generic>(name, in, inputs);
|
||||
}
|
||||
|
||||
} // namespace gapi
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <template <typename> class Wrapper, typename T>
|
||||
struct WrapType { using type = Wrapper<T>; };
|
||||
|
||||
template <template <typename> class T, typename... Types>
|
||||
using MakeVariantType = cv::util::variant<typename WrapType<T, Types>::type...>;
|
||||
|
||||
template<typename T> struct ArgTypeTraits;
|
||||
|
||||
#define DEFINE_TYPE_TRAITS(T, E) \
|
||||
template <> \
|
||||
struct ArgTypeTraits<T> { \
|
||||
static constexpr const cv::gapi::ArgType type = E; \
|
||||
}; \
|
||||
|
||||
GARRAY_TYPE_LIST_G(DEFINE_TYPE_TRAITS, DEFINE_TYPE_TRAITS)
|
||||
|
||||
} // namespace detail
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GOpaqueT
|
||||
{
|
||||
public:
|
||||
GOpaqueT() = default;
|
||||
using Storage = cv::detail::MakeVariantType<cv::GOpaque, GOPAQUE_TYPE_LIST_G(ID_, ID)>;
|
||||
|
||||
template<typename T>
|
||||
GOpaqueT(cv::GOpaque<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { };
|
||||
|
||||
GAPI_WRAP GOpaqueT(gapi::ArgType type) : m_type(type)
|
||||
{
|
||||
|
||||
#define HC(T, K) case K: \
|
||||
m_arg = cv::GOpaque<T>(); \
|
||||
break;
|
||||
|
||||
SWITCH(type, GOPAQUE_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
|
||||
cv::detail::GOpaqueU strip() {
|
||||
#define HC(T, K) case Storage:: index_of<cv::GOpaque<T>>(): \
|
||||
return cv::util::get<cv::GOpaque<T>>(m_arg).strip(); \
|
||||
|
||||
SWITCH(m_arg.index(), GOPAQUE_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
|
||||
GAPI_Assert(false);
|
||||
}
|
||||
|
||||
GAPI_WRAP gapi::ArgType type() { return m_type; }
|
||||
const Storage& arg() const { return m_arg; }
|
||||
|
||||
private:
|
||||
gapi::ArgType m_type;
|
||||
Storage m_arg;
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GArrayT
|
||||
{
|
||||
public:
|
||||
GArrayT() = default;
|
||||
using Storage = cv::detail::MakeVariantType<cv::GArray, GARRAY_TYPE_LIST_G(ID_, ID)>;
|
||||
|
||||
template<typename T>
|
||||
GArrayT(cv::GArray<T> arg) : m_type(cv::detail::ArgTypeTraits<T>::type), m_arg(arg) { };
|
||||
|
||||
GAPI_WRAP GArrayT(gapi::ArgType type) : m_type(type)
|
||||
{
|
||||
|
||||
#define HC(T, K) case K: \
|
||||
m_arg = cv::GArray<T>(); \
|
||||
break;
|
||||
|
||||
SWITCH(type, GARRAY_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
|
||||
cv::detail::GArrayU strip() {
|
||||
#define HC(T, K) case Storage:: index_of<cv::GArray<T>>(): \
|
||||
return cv::util::get<cv::GArray<T>>(m_arg).strip(); \
|
||||
|
||||
SWITCH(m_arg.index(), GARRAY_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
|
||||
GAPI_Assert(false);
|
||||
}
|
||||
|
||||
GAPI_WRAP gapi::ArgType type() { return m_type; }
|
||||
const Storage& arg() const { return m_arg; }
|
||||
|
||||
private:
|
||||
gapi::ArgType m_type;
|
||||
Storage m_arg;
|
||||
};
|
||||
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GOutputs
|
||||
{
|
||||
public:
|
||||
GOutputs() = default;
|
||||
GOutputs(const std::string& id, cv::GKernel::M outMeta, cv::GArgs &&ins);
|
||||
|
||||
GAPI_WRAP cv::GMat getGMat();
|
||||
GAPI_WRAP cv::GScalar getGScalar();
|
||||
GAPI_WRAP cv::GArrayT getGArray(cv::gapi::ArgType type);
|
||||
GAPI_WRAP cv::GOpaqueT getGOpaque(cv::gapi::ArgType type);
|
||||
|
||||
private:
|
||||
class Priv;
|
||||
std::shared_ptr<Priv> m_priv;
|
||||
};
|
||||
|
||||
GOutputs op(const std::string& id, cv::GKernel::M outMeta, cv::GArgs&& args);
|
||||
|
||||
template <typename... T>
|
||||
GOutputs op(const std::string& id, cv::GKernel::M outMeta, T&&... args)
|
||||
{
|
||||
return op(id, outMeta, cv::GArgs{cv::GArg(std::forward<T>(args))... });
|
||||
}
|
||||
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
cv::gapi::wip::GOutputs cv::gapi::wip::op(const std::string& id,
|
||||
cv::GKernel::M outMeta,
|
||||
cv::GArgs&& args)
|
||||
{
|
||||
cv::gapi::wip::GOutputs outputs{id, outMeta, std::move(args)};
|
||||
return outputs;
|
||||
}
|
||||
|
||||
class cv::gapi::wip::GOutputs::Priv
|
||||
{
|
||||
public:
|
||||
Priv(const std::string& id, cv::GKernel::M outMeta, cv::GArgs &&ins);
|
||||
|
||||
cv::GMat getGMat();
|
||||
cv::GScalar getGScalar();
|
||||
cv::GArrayT getGArray(cv::gapi::ArgType);
|
||||
cv::GOpaqueT getGOpaque(cv::gapi::ArgType);
|
||||
|
||||
private:
|
||||
int output = 0;
|
||||
std::unique_ptr<cv::GCall> m_call;
|
||||
};
|
||||
|
||||
cv::gapi::wip::GOutputs::Priv::Priv(const std::string& id, cv::GKernel::M outMeta, cv::GArgs &&args)
|
||||
{
|
||||
cv::GKinds kinds;
|
||||
kinds.reserve(args.size());
|
||||
std::transform(args.begin(), args.end(), std::back_inserter(kinds),
|
||||
[](const cv::GArg& arg) { return arg.opaque_kind; });
|
||||
|
||||
m_call.reset(new cv::GCall{cv::GKernel{id, {}, outMeta, {}, std::move(kinds), {}}});
|
||||
m_call->setArgs(std::move(args));
|
||||
}
|
||||
|
||||
cv::GMat cv::gapi::wip::GOutputs::Priv::getGMat()
|
||||
{
|
||||
m_call->kernel().outShapes.push_back(cv::GShape::GMAT);
|
||||
// ...so _empty_ constructor is passed here.
|
||||
m_call->kernel().outCtors.emplace_back(cv::util::monostate{});
|
||||
return m_call->yield(output++);
|
||||
}
|
||||
|
||||
cv::GScalar cv::gapi::wip::GOutputs::Priv::getGScalar()
|
||||
{
|
||||
m_call->kernel().outShapes.push_back(cv::GShape::GSCALAR);
|
||||
// ...so _empty_ constructor is passed here.
|
||||
m_call->kernel().outCtors.emplace_back(cv::util::monostate{});
|
||||
return m_call->yieldScalar(output++);
|
||||
}
|
||||
|
||||
cv::GArrayT cv::gapi::wip::GOutputs::Priv::getGArray(cv::gapi::ArgType type)
|
||||
{
|
||||
m_call->kernel().outShapes.push_back(cv::GShape::GARRAY);
|
||||
#define HC(T, K) \
|
||||
case K: \
|
||||
m_call->kernel().outCtors.emplace_back(cv::detail::GObtainCtor<cv::GArray<T>>::get()); \
|
||||
return cv::GArrayT(m_call->yieldArray<T>(output++)); \
|
||||
|
||||
SWITCH(type, GARRAY_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
|
||||
cv::GOpaqueT cv::gapi::wip::GOutputs::Priv::getGOpaque(cv::gapi::ArgType type)
|
||||
{
|
||||
m_call->kernel().outShapes.push_back(cv::GShape::GOPAQUE);
|
||||
#define HC(T, K) \
|
||||
case K: \
|
||||
m_call->kernel().outCtors.emplace_back(cv::detail::GObtainCtor<cv::GOpaque<T>>::get()); \
|
||||
return cv::GOpaqueT(m_call->yieldOpaque<T>(output++)); \
|
||||
|
||||
SWITCH(type, GOPAQUE_TYPE_LIST_G, HC)
|
||||
#undef HC
|
||||
}
|
||||
|
||||
cv::gapi::wip::GOutputs::GOutputs(const std::string& id,
|
||||
cv::GKernel::M outMeta,
|
||||
cv::GArgs &&ins) :
|
||||
m_priv(new cv::gapi::wip::GOutputs::Priv(id, outMeta, std::move(ins)))
|
||||
{
|
||||
}
|
||||
|
||||
cv::GMat cv::gapi::wip::GOutputs::getGMat()
|
||||
{
|
||||
return m_priv->getGMat();
|
||||
}
|
||||
|
||||
cv::GScalar cv::gapi::wip::GOutputs::getGScalar()
|
||||
{
|
||||
return m_priv->getGScalar();
|
||||
}
|
||||
|
||||
cv::GArrayT cv::gapi::wip::GOutputs::getGArray(cv::gapi::ArgType type)
|
||||
{
|
||||
return m_priv->getGArray(type);
|
||||
}
|
||||
|
||||
cv::GOpaqueT cv::gapi::wip::GOutputs::getGOpaque(cv::gapi::ArgType type)
|
||||
{
|
||||
return m_priv->getGOpaque(type);
|
||||
}
|
||||
|
||||
#endif // OPENCV_GAPI_PYTHON_BRIDGE_HPP
|
||||
67
modules/gapi/misc/python/shadow_gapi.hpp
Normal file
67
modules/gapi/misc/python/shadow_gapi.hpp
Normal file
@@ -0,0 +1,67 @@
|
||||
#error This is a shadow header file, which is not intended for processing by any compiler. \
|
||||
Only bindings parser should handle this file.
|
||||
|
||||
namespace cv
|
||||
{
|
||||
struct GAPI_EXPORTS_W_SIMPLE GCompileArg { };
|
||||
|
||||
GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg);
|
||||
GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GNetPackage pkg);
|
||||
GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage kernels, gapi::GNetPackage nets);
|
||||
|
||||
// NB: This classes doesn't exist in *.so
|
||||
// HACK: Mark them as a class to force python wrapper generate code for this entities
|
||||
class GAPI_EXPORTS_W_SIMPLE GProtoArg { };
|
||||
class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { };
|
||||
class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { };
|
||||
class GAPI_EXPORTS_W_SIMPLE GRunArg { };
|
||||
class GAPI_EXPORTS_W_SIMPLE GMetaArg { GAPI_WRAP GMetaArg(); };
|
||||
|
||||
using GProtoInputArgs = GIOProtoArgs<In_Tag>;
|
||||
using GProtoOutputArgs = GIOProtoArgs<Out_Tag>;
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GInferInputs
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GInferInputs();
|
||||
GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value);
|
||||
GAPI_WRAP void setInput(const std::string& name, const cv::GFrame& value);
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GInferListInputs
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GInferListInputs();
|
||||
GAPI_WRAP void setInput(const std::string& name, const cv::GArray<cv::GMat>& value);
|
||||
GAPI_WRAP void setInput(const std::string& name, const cv::GArray<cv::Rect>& value);
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GInferOutputs
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GInferOutputs();
|
||||
GAPI_WRAP cv::GMat at(const std::string& name);
|
||||
};
|
||||
|
||||
class GAPI_EXPORTS_W_SIMPLE GInferListOutputs
|
||||
{
|
||||
public:
|
||||
GAPI_WRAP GInferListOutputs();
|
||||
GAPI_WRAP cv::GArray<cv::GMat> at(const std::string& name);
|
||||
};
|
||||
|
||||
namespace detail
|
||||
{
|
||||
struct GAPI_EXPORTS_W_SIMPLE ExtractArgsCallback { };
|
||||
struct GAPI_EXPORTS_W_SIMPLE ExtractMetaCallback { };
|
||||
} // namespace detail
|
||||
|
||||
namespace gapi
|
||||
{
|
||||
GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params);
|
||||
namespace wip
|
||||
{
|
||||
class GAPI_EXPORTS_W IStreamSource { };
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
190
modules/gapi/misc/python/test/test_gapi_core.py
Normal file
190
modules/gapi/misc/python/test/test_gapi_core.py
Normal file
@@ -0,0 +1,190 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
|
||||
# Plaidml is an optional backend
|
||||
pkgs = [
|
||||
('ocl' , cv.gapi.core.ocl.kernels()),
|
||||
('cpu' , cv.gapi.core.cpu.kernels()),
|
||||
('fluid' , cv.gapi.core.fluid.kernels())
|
||||
# ('plaidml', cv.gapi.core.plaidml.kernels())
|
||||
]
|
||||
|
||||
|
||||
class gapi_core_test(NewOpenCVTests):
|
||||
|
||||
def test_add(self):
|
||||
# TODO: Extend to use any type and size here
|
||||
sz = (720, 1280)
|
||||
in1 = np.full(sz, 100)
|
||||
in2 = np.full(sz, 50)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.add(in1, in2)
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_out = cv.gapi.add(g_in1, g_in2)
|
||||
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_add_uint8(self):
|
||||
sz = (720, 1280)
|
||||
in1 = np.full(sz, 100, dtype=np.uint8)
|
||||
in2 = np.full(sz, 50 , dtype=np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.add(in1, in2)
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_out = cv.gapi.add(g_in1, g_in2)
|
||||
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_mean(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.imread(img_path)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.mean(in_mat)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.mean(g_in)
|
||||
comp = cv.GComputation(g_in, g_out)
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_split3(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.imread(img_path)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.split(in_mat)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
b, g, r = cv.gapi.split3(g_in)
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
for e, a in zip(expected, actual):
|
||||
self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
self.assertEqual(e.dtype, a.dtype, 'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_threshold(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
|
||||
maxv = (30, 30)
|
||||
|
||||
# OpenCV
|
||||
expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0], cv.THRESH_TRIANGLE)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_sc = cv.GScalar()
|
||||
mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE)
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
self.assertEqual(expected_mat.dtype, actual_mat.dtype,
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
self.assertEqual(expected_thresh, actual_thresh[0],
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
def test_kmeans(self):
|
||||
# K-means params
|
||||
count = 100
|
||||
sz = (count, 2)
|
||||
in_mat = np.random.random(sz).astype(np.float32)
|
||||
K = 5
|
||||
flags = cv.KMEANS_RANDOM_CENTERS
|
||||
attempts = 1;
|
||||
criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
compactness, out_labels, centers = cv.gapi.kmeans(g_in, K, criteria, attempts, flags)
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(compactness, out_labels, centers))
|
||||
|
||||
compact, labels, centers = comp.apply(cv.gin(in_mat))
|
||||
|
||||
# Assert
|
||||
self.assertTrue(compact >= 0)
|
||||
self.assertEqual(sz[0], labels.shape[0])
|
||||
self.assertEqual(1, labels.shape[1])
|
||||
self.assertTrue(labels.size != 0)
|
||||
self.assertEqual(centers.shape[1], sz[1]);
|
||||
self.assertEqual(centers.shape[0], K);
|
||||
self.assertTrue(centers.size != 0);
|
||||
|
||||
|
||||
def generate_random_points(self, sz):
|
||||
arr = np.random.random(sz).astype(np.float32).T
|
||||
return list(zip(arr[0], arr[1]))
|
||||
|
||||
|
||||
def test_kmeans_2d(self):
|
||||
# K-means 2D params
|
||||
count = 100
|
||||
sz = (count, 2)
|
||||
amount = sz[0]
|
||||
K = 5
|
||||
flags = cv.KMEANS_RANDOM_CENTERS
|
||||
attempts = 1;
|
||||
criteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, 30, 0);
|
||||
in_vector = self.generate_random_points(sz)
|
||||
in_labels = []
|
||||
|
||||
# G-API
|
||||
data = cv.GArrayT(cv.gapi.CV_POINT2F)
|
||||
best_labels = cv.GArrayT(cv.gapi.CV_INT)
|
||||
|
||||
compactness, out_labels, centers = cv.gapi.kmeans(data, K, best_labels, criteria, attempts, flags);
|
||||
comp = cv.GComputation(cv.GIn(data, best_labels), cv.GOut(compactness, out_labels, centers));
|
||||
|
||||
compact, labels, centers = comp.apply(cv.gin(in_vector, in_labels));
|
||||
|
||||
# Assert
|
||||
self.assertTrue(compact >= 0)
|
||||
self.assertEqual(amount, len(labels))
|
||||
self.assertEqual(K, len(centers))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
106
modules/gapi/misc/python/test/test_gapi_imgproc.py
Normal file
106
modules/gapi/misc/python/test/test_gapi_imgproc.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
|
||||
# Plaidml is an optional backend
|
||||
pkgs = [
|
||||
('ocl' , cv.gapi.core.ocl.kernels()),
|
||||
('cpu' , cv.gapi.core.cpu.kernels()),
|
||||
('fluid' , cv.gapi.core.fluid.kernels())
|
||||
# ('plaidml', cv.gapi.core.plaidml.kernels())
|
||||
]
|
||||
|
||||
|
||||
class gapi_imgproc_test(NewOpenCVTests):
|
||||
|
||||
def test_good_features_to_track(self):
|
||||
# TODO: Extend to use any type and size here
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in1 = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
|
||||
|
||||
# NB: goodFeaturesToTrack configuration
|
||||
max_corners = 50
|
||||
quality_lvl = 0.01
|
||||
min_distance = 10
|
||||
block_sz = 3
|
||||
use_harris_detector = True
|
||||
k = 0.04
|
||||
mask = None
|
||||
|
||||
# OpenCV
|
||||
expected = cv.goodFeaturesToTrack(in1, max_corners, quality_lvl,
|
||||
min_distance, mask=mask,
|
||||
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz, use_harris_detector, k)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
|
||||
# NB: OpenCV & G-API have different output shapes:
|
||||
# OpenCV - (num_points, 1, 2)
|
||||
# G-API - (num_points, 2)
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected.flatten(),
|
||||
np.array(actual, dtype=np.float32).flatten(),
|
||||
cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_rgb2gray(self):
|
||||
# TODO: Extend to use any type and size here
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in1 = cv.imread(img_path)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.cvtColor(in1, cv.COLOR_RGB2GRAY)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.RGB2Gray(g_in)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in1), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_bounding_rect(self):
|
||||
sz = 1280
|
||||
fscale = 256
|
||||
|
||||
def sample_value(fscale):
|
||||
return np.random.uniform(0, 255 * fscale) / fscale
|
||||
|
||||
points = np.array([(sample_value(fscale), sample_value(fscale)) for _ in range(1280)], np.float32)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.boundingRect(points)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.boundingRect(g_in)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
321
modules/gapi/misc/python/test/test_gapi_infer.py
Normal file
321
modules/gapi/misc/python/test/test_gapi_infer.py
Normal file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
|
||||
class test_gapi_infer(NewOpenCVTests):
|
||||
|
||||
def infer_reference_network(self, model_path, weights_path, img):
|
||||
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
|
||||
|
||||
blob = cv.dnn.blobFromImage(img)
|
||||
|
||||
net.setInput(blob)
|
||||
return net.forward(net.getUnconnectedOutLayersNames())
|
||||
|
||||
|
||||
def make_roi(self, img, roi):
|
||||
return img[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2], ...]
|
||||
|
||||
|
||||
def test_age_gender_infer(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
img = cv.resize(cv.imread(img_path), (62,62))
|
||||
|
||||
# OpenCV DNN
|
||||
dnn_age, dnn_gender = self.infer_reference_network(model_path, weights_path, img)
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
inputs = cv.GInferInputs()
|
||||
inputs.setInput('data', g_in)
|
||||
|
||||
outputs = cv.gapi.infer("net", inputs)
|
||||
age_g = outputs.at("age_conv3")
|
||||
gender_g = outputs.at("prob")
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Check
|
||||
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_age_gender_infer_roi(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
img = cv.imread(img_path)
|
||||
roi = (10, 10, 62, 62)
|
||||
|
||||
# OpenCV DNN
|
||||
dnn_age, dnn_gender = self.infer_reference_network(model_path,
|
||||
weights_path,
|
||||
self.make_roi(img, roi))
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
g_roi = cv.GOpaqueT(cv.gapi.CV_RECT)
|
||||
inputs = cv.GInferInputs()
|
||||
inputs.setInput('data', g_in)
|
||||
|
||||
outputs = cv.gapi.infer("net", g_roi, inputs)
|
||||
age_g = outputs.at("age_conv3")
|
||||
gender_g = outputs.at("prob")
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_roi), cv.GOut(age_g, gender_g))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_age, gapi_gender = comp.apply(cv.gin(img, roi), args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Check
|
||||
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_age_gender_infer_roi_list(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
|
||||
rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62), (80, 50, 62, 62)]
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
img = cv.imread(img_path)
|
||||
|
||||
# OpenCV DNN
|
||||
dnn_age_list = []
|
||||
dnn_gender_list = []
|
||||
for roi in rois:
|
||||
age, gender = self.infer_reference_network(model_path,
|
||||
weights_path,
|
||||
self.make_roi(img, roi))
|
||||
dnn_age_list.append(age)
|
||||
dnn_gender_list.append(gender)
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
g_rois = cv.GArrayT(cv.gapi.CV_RECT)
|
||||
inputs = cv.GInferInputs()
|
||||
inputs.setInput('data', g_in)
|
||||
|
||||
outputs = cv.gapi.infer("net", g_rois, inputs)
|
||||
age_g = outputs.at("age_conv3")
|
||||
gender_g = outputs.at("prob")
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_rois), cv.GOut(age_g, gender_g))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_age_list, gapi_gender_list = comp.apply(cv.gin(img, rois),
|
||||
args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Check
|
||||
for gapi_age, gapi_gender, dnn_age, dnn_gender in zip(gapi_age_list,
|
||||
gapi_gender_list,
|
||||
dnn_age_list,
|
||||
dnn_gender_list):
|
||||
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_age_gender_infer2_roi(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
|
||||
rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62), (80, 50, 62, 62)]
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
img = cv.imread(img_path)
|
||||
|
||||
# OpenCV DNN
|
||||
dnn_age_list = []
|
||||
dnn_gender_list = []
|
||||
for roi in rois:
|
||||
age, gender = self.infer_reference_network(model_path,
|
||||
weights_path,
|
||||
self.make_roi(img, roi))
|
||||
dnn_age_list.append(age)
|
||||
dnn_gender_list.append(gender)
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
g_rois = cv.GArrayT(cv.gapi.CV_RECT)
|
||||
inputs = cv.GInferListInputs()
|
||||
inputs.setInput('data', g_rois)
|
||||
|
||||
outputs = cv.gapi.infer2("net", g_in, inputs)
|
||||
age_g = outputs.at("age_conv3")
|
||||
gender_g = outputs.at("prob")
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_rois), cv.GOut(age_g, gender_g))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_age_list, gapi_gender_list = comp.apply(cv.gin(img, rois),
|
||||
args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Check
|
||||
for gapi_age, gapi_gender, dnn_age, dnn_gender in zip(gapi_age_list,
|
||||
gapi_gender_list,
|
||||
dnn_age_list,
|
||||
dnn_gender_list):
|
||||
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
|
||||
|
||||
|
||||
|
||||
def test_person_detection_retail_0013(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
img_path = self.find_file('gpu/lbpcascade/er.png', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
img = cv.resize(cv.imread(img_path), (544, 320))
|
||||
|
||||
# OpenCV DNN
|
||||
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
|
||||
|
||||
blob = cv.dnn.blobFromImage(img)
|
||||
|
||||
def parseSSD(detections, size):
|
||||
h, w = size
|
||||
bboxes = []
|
||||
detections = detections.reshape(-1, 7)
|
||||
for sample_id, class_id, confidence, xmin, ymin, xmax, ymax in detections:
|
||||
if confidence >= 0.5:
|
||||
x = int(xmin * w)
|
||||
y = int(ymin * h)
|
||||
width = int(xmax * w - x)
|
||||
height = int(ymax * h - y)
|
||||
bboxes.append((x, y, width, height))
|
||||
|
||||
return bboxes
|
||||
|
||||
net.setInput(blob)
|
||||
dnn_detections = net.forward()
|
||||
dnn_boxes = parseSSD(np.array(dnn_detections), img.shape[:2])
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
inputs = cv.GInferInputs()
|
||||
inputs.setInput('data', g_in)
|
||||
|
||||
g_sz = cv.gapi.streaming.size(g_in)
|
||||
outputs = cv.gapi.infer("net", inputs)
|
||||
detections = outputs.at("detection_out")
|
||||
bboxes = cv.gapi.parseSSD(detections, g_sz, 0.5, False, False)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(bboxes))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
gapi_boxes = comp.apply(cv.gin(img.astype(np.float32)),
|
||||
args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(np.array(dnn_boxes).flatten(),
|
||||
np.array(gapi_boxes).flatten(),
|
||||
cv.NORM_INF))
|
||||
|
||||
|
||||
def test_person_detection_retail_0013(self):
|
||||
# NB: Check IE
|
||||
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
|
||||
return
|
||||
|
||||
root_path = '/omz_intel_models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013'
|
||||
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
|
||||
img_path = self.find_file('gpu/lbpcascade/er.png', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
device_id = 'CPU'
|
||||
img = cv.resize(cv.imread(img_path), (544, 320))
|
||||
|
||||
# OpenCV DNN
|
||||
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
|
||||
|
||||
blob = cv.dnn.blobFromImage(img)
|
||||
|
||||
def parseSSD(detections, size):
|
||||
h, w = size
|
||||
bboxes = []
|
||||
detections = detections.reshape(-1, 7)
|
||||
for sample_id, class_id, confidence, xmin, ymin, xmax, ymax in detections:
|
||||
if confidence >= 0.5:
|
||||
x = int(xmin * w)
|
||||
y = int(ymin * h)
|
||||
width = int(xmax * w - x)
|
||||
height = int(ymax * h - y)
|
||||
bboxes.append((x, y, width, height))
|
||||
|
||||
return bboxes
|
||||
|
||||
net.setInput(blob)
|
||||
dnn_detections = net.forward()
|
||||
dnn_boxes = parseSSD(np.array(dnn_detections), img.shape[:2])
|
||||
|
||||
# OpenCV G-API
|
||||
g_in = cv.GMat()
|
||||
inputs = cv.GInferInputs()
|
||||
inputs.setInput('data', g_in)
|
||||
|
||||
g_sz = cv.gapi.streaming.size(g_in)
|
||||
outputs = cv.gapi.infer("net", inputs)
|
||||
detections = outputs.at("detection_out")
|
||||
bboxes = cv.gapi.parseSSD(detections, g_sz, 0.5, False, False)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(bboxes))
|
||||
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
|
||||
|
||||
gapi_boxes = comp.apply(cv.gin(img.astype(np.float32)),
|
||||
args=cv.compile_args(cv.gapi.networks(pp)))
|
||||
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(np.array(dnn_boxes).flatten(),
|
||||
np.array(gapi_boxes).flatten(),
|
||||
cv.NORM_INF))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
526
modules/gapi/misc/python/test/test_gapi_sample_pipelines.py
Normal file
526
modules/gapi/misc/python/test/test_gapi_sample_pipelines.py
Normal file
@@ -0,0 +1,526 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
|
||||
# Plaidml is an optional backend
|
||||
pkgs = [
|
||||
('ocl' , cv.gapi.core.ocl.kernels()),
|
||||
('cpu' , cv.gapi.core.cpu.kernels()),
|
||||
('fluid' , cv.gapi.core.fluid.kernels())
|
||||
# ('plaidml', cv.gapi.core.plaidml.kernels())
|
||||
]
|
||||
|
||||
# Test output GMat.
|
||||
def custom_add(img1, img2, dtype):
|
||||
return cv.add(img1, img2)
|
||||
|
||||
# Test output GScalar.
|
||||
def custom_mean(img):
|
||||
return cv.mean(img)
|
||||
|
||||
# Test output tuple of GMat's.
|
||||
def custom_split3(img):
|
||||
# NB: cv.split return list but g-api requires tuple in multiple output case
|
||||
return tuple(cv.split(img))
|
||||
|
||||
# Test output GOpaque.
|
||||
def custom_size(img):
|
||||
# NB: Take only H, W, because the operation should return cv::Size which is 2D.
|
||||
return img.shape[:2]
|
||||
|
||||
# Test output GArray.
|
||||
def custom_goodFeaturesToTrack(img, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz,
|
||||
use_harris_detector, k):
|
||||
features = cv.goodFeaturesToTrack(img, max_corners, quality_lvl,
|
||||
min_distance, mask=mask,
|
||||
blockSize=block_sz,
|
||||
useHarrisDetector=use_harris_detector, k=k)
|
||||
# NB: The operation output is cv::GArray<cv::Pointf>, so it should be mapped
|
||||
# to python paramaters like this: [(1.2, 3.4), (5.2, 3.2)], because the cv::Point2f
|
||||
# according to opencv rules mapped to the tuple and cv::GArray<> mapped to the list.
|
||||
# OpenCV returns np.array with shape (n_features, 1, 2), so let's to convert it to list
|
||||
# tuples with size - n_features.
|
||||
features = list(map(tuple, features.reshape(features.shape[0], -1)))
|
||||
return features
|
||||
|
||||
# Test input scalar.
|
||||
def custom_addC(img, sc, dtype):
|
||||
# NB: dtype is just ignored in this implementation.
|
||||
# More over from G-API kernel got scalar as tuples with 4 elements
|
||||
# where the last element is equal to zero, just cut him for broadcasting.
|
||||
return img + np.array(sc, dtype=np.uint8)[:-1]
|
||||
|
||||
|
||||
# Test input opaque.
|
||||
def custom_sizeR(rect):
|
||||
# NB: rect - is tuple (x, y, h, w)
|
||||
return (rect[2], rect[3])
|
||||
|
||||
# Test input array.
|
||||
def custom_boundingRect(array):
|
||||
# NB: OpenCV - numpy array (n_points x 2).
|
||||
# G-API - array of tuples (n_points).
|
||||
return cv.boundingRect(np.array(array))
|
||||
|
||||
# Test input mat
|
||||
def add(g_in1, g_in2, dtype):
|
||||
def custom_add_meta(img_desc1, img_desc2, dtype):
|
||||
return img_desc1
|
||||
|
||||
return cv.gapi.wip.op('custom.add', custom_add_meta, g_in1, g_in2, dtype).getGMat()
|
||||
|
||||
|
||||
# Test multiple output mat
|
||||
def split3(g_in):
|
||||
def custom_split3_meta(img_desc):
|
||||
out_desc = img_desc.withType(img_desc.depth, 1)
|
||||
return out_desc, out_desc, out_desc
|
||||
|
||||
op = cv.gapi.wip.op('custom.split3', custom_split3_meta, g_in)
|
||||
|
||||
ch1 = op.getGMat()
|
||||
ch2 = op.getGMat()
|
||||
ch3 = op.getGMat()
|
||||
|
||||
return ch1, ch2, ch3
|
||||
|
||||
# Test output scalar
|
||||
def mean(g_in):
|
||||
def custom_mean_meta(img_desc):
|
||||
return cv.empty_scalar_desc()
|
||||
|
||||
op = cv.gapi.wip.op('custom.mean', custom_mean_meta, g_in)
|
||||
return op.getGScalar()
|
||||
|
||||
|
||||
# Test input scalar
|
||||
def addC(g_in, g_sc, dtype):
|
||||
def custom_addC_meta(img_desc, sc_desc, dtype):
|
||||
return img_desc
|
||||
|
||||
op = cv.gapi.wip.op('custom.addC', custom_addC_meta, g_in, g_sc, dtype)
|
||||
return op.getGMat()
|
||||
|
||||
|
||||
# Test output opaque.
|
||||
def size(g_in):
|
||||
def custom_size_meta(img_desc):
|
||||
return cv.empty_gopaque_desc()
|
||||
|
||||
op = cv.gapi.wip.op('custom.size', custom_size_meta, g_in)
|
||||
return op.getGOpaque(cv.gapi.CV_SIZE)
|
||||
|
||||
|
||||
# Test input opaque.
|
||||
def sizeR(g_rect):
|
||||
def custom_sizeR_meta(opaque_desc):
|
||||
return cv.empty_gopaque_desc()
|
||||
|
||||
op = cv.gapi.wip.op('custom.sizeR', custom_sizeR_meta, g_rect)
|
||||
return op.getGOpaque(cv.gapi.CV_SIZE)
|
||||
|
||||
|
||||
# Test input array.
|
||||
def boundingRect(g_array):
|
||||
def custom_boundingRect_meta(array_desc):
|
||||
return cv.empty_gopaque_desc()
|
||||
|
||||
op = cv.gapi.wip.op('custom.boundingRect', custom_boundingRect_meta, g_array)
|
||||
return op.getGOpaque(cv.gapi.CV_RECT)
|
||||
|
||||
|
||||
# Test output GArray.
|
||||
def goodFeaturesToTrack(g_in, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz,
|
||||
use_harris_detector, k):
|
||||
def custom_goodFeaturesToTrack_meta(img_desc, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz, use_harris_detector, k):
|
||||
return cv.empty_array_desc()
|
||||
|
||||
op = cv.gapi.wip.op('custom.goodFeaturesToTrack', custom_goodFeaturesToTrack_meta, g_in,
|
||||
max_corners, quality_lvl, min_distance, mask, block_sz, use_harris_detector, k)
|
||||
return op.getGArray(cv.gapi.CV_POINT2F)
|
||||
|
||||
|
||||
class gapi_sample_pipelines(NewOpenCVTests):
|
||||
|
||||
# NB: This test check multiple outputs for operation
|
||||
def test_mean_over_r(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.imread(img_path)
|
||||
|
||||
# # OpenCV
|
||||
_, _, r_ch = cv.split(in_mat)
|
||||
expected = cv.mean(r_ch)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
b, g, r = cv.gapi.split3(g_in)
|
||||
g_out = cv.gapi.mean(r)
|
||||
comp = cv.GComputation(g_in, g_out)
|
||||
|
||||
for pkg_name, pkg in pkgs:
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
|
||||
'Failed on ' + pkg_name + ' backend')
|
||||
|
||||
|
||||
def test_custom_mean(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.imread(img_path)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.mean(in_mat)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.mean(g_in)
|
||||
|
||||
comp = cv.GComputation(g_in, g_out)
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_mean, 'org.opencv.core.math.mean'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
# Comparison
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
|
||||
def test_custom_add(self):
|
||||
sz = (3, 3)
|
||||
in_mat1 = np.full(sz, 45, dtype=np.uint8)
|
||||
in_mat2 = np.full(sz, 50 , dtype=np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.add(in_mat1, in_mat2)
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_out = cv.gapi.add(g_in1, g_in2)
|
||||
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_add, 'org.opencv.core.math.add'))
|
||||
actual = comp.apply(cv.gin(in_mat1, in_mat2), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_size(self):
|
||||
sz = (100, 150, 3)
|
||||
in_mat = np.full(sz, 45, dtype=np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = (100, 150)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_sz = cv.gapi.streaming.size(g_in)
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_sz))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_size, 'org.opencv.streaming.size'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_goodFeaturesToTrack(self):
|
||||
# G-API
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
|
||||
|
||||
# NB: goodFeaturesToTrack configuration
|
||||
max_corners = 50
|
||||
quality_lvl = 0.01
|
||||
min_distance = 10
|
||||
block_sz = 3
|
||||
use_harris_detector = True
|
||||
k = 0.04
|
||||
mask = None
|
||||
|
||||
# OpenCV
|
||||
expected = cv.goodFeaturesToTrack(in_mat, max_corners, quality_lvl,
|
||||
min_distance, mask=mask,
|
||||
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz, use_harris_detector, k)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
pkg = cv.gapi.wip.kernels((custom_goodFeaturesToTrack, 'org.opencv.imgproc.feature.goodFeaturesToTrack'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
# NB: OpenCV & G-API have different output types.
|
||||
# OpenCV - numpy array with shape (num_points, 1, 2)
|
||||
# G-API - list of tuples with size - num_points
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected.flatten(),
|
||||
np.array(actual, dtype=np.float32).flatten(), cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_addC(self):
|
||||
sz = (3, 3, 3)
|
||||
in_mat = np.full(sz, 45, dtype=np.uint8)
|
||||
sc = (50, 10, 20)
|
||||
|
||||
# Numpy reference, make array from sc to keep uint8 dtype.
|
||||
expected = in_mat + np.array(sc, dtype=np.uint8)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_sc = cv.GScalar()
|
||||
g_out = cv.gapi.addC(g_in, g_sc)
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(g_out))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_addC, 'org.opencv.core.math.addC'))
|
||||
actual = comp.apply(cv.gin(in_mat, sc), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_sizeR(self):
|
||||
# x, y, h, w
|
||||
roi = (10, 15, 100, 150)
|
||||
|
||||
expected = (100, 150)
|
||||
|
||||
# G-API
|
||||
g_r = cv.GOpaqueT(cv.gapi.CV_RECT)
|
||||
g_sz = cv.gapi.streaming.size(g_r)
|
||||
comp = cv.GComputation(cv.GIn(g_r), cv.GOut(g_sz))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_sizeR, 'org.opencv.streaming.sizeR'))
|
||||
actual = comp.apply(cv.gin(roi), args=cv.compile_args(pkg))
|
||||
|
||||
# cv.norm works with tuples ?
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_boundingRect(self):
|
||||
points = [(0,0), (0,1), (1,0), (1,1)]
|
||||
|
||||
# OpenCV
|
||||
expected = cv.boundingRect(np.array(points))
|
||||
|
||||
# G-API
|
||||
g_pts = cv.GArrayT(cv.gapi.CV_POINT)
|
||||
g_br = cv.gapi.boundingRect(g_pts)
|
||||
comp = cv.GComputation(cv.GIn(g_pts), cv.GOut(g_br))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_boundingRect, 'org.opencv.imgproc.shape.boundingRectVector32S'))
|
||||
actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg))
|
||||
|
||||
# cv.norm works with tuples ?
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_multiple_custom_kernels(self):
|
||||
sz = (3, 3, 3)
|
||||
in_mat1 = np.full(sz, 45, dtype=np.uint8)
|
||||
in_mat2 = np.full(sz, 50 , dtype=np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.mean(cv.split(cv.add(in_mat1, in_mat2))[1])
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_sum = cv.gapi.add(g_in1, g_in2)
|
||||
g_b, g_r, g_g = cv.gapi.split3(g_sum)
|
||||
g_mean = cv.gapi.mean(g_b)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_mean))
|
||||
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_add , 'org.opencv.core.math.add'),
|
||||
(custom_mean , 'org.opencv.core.math.mean'),
|
||||
(custom_split3, 'org.opencv.core.transform.split3'))
|
||||
|
||||
actual = comp.apply(cv.gin(in_mat1, in_mat2), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_add(self):
|
||||
sz = (3, 3)
|
||||
in_mat1 = np.full(sz, 45, dtype=np.uint8)
|
||||
in_mat2 = np.full(sz, 50, dtype=np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.add(in_mat1, in_mat2)
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
g_out = add(g_in1, g_in2, cv.CV_8UC1)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_add, 'custom.add'))
|
||||
actual = comp.apply(cv.gin(in_mat1, in_mat2), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_split3(self):
|
||||
sz = (4, 4)
|
||||
in_ch1 = np.full(sz, 1, dtype=np.uint8)
|
||||
in_ch2 = np.full(sz, 2, dtype=np.uint8)
|
||||
in_ch3 = np.full(sz, 3, dtype=np.uint8)
|
||||
# H x W x C
|
||||
in_mat = np.stack((in_ch1, in_ch2, in_ch3), axis=2)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_ch1, g_ch2, g_ch3 = split3(g_in)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_ch1, g_ch2, g_ch3))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_split3, 'custom.split3'))
|
||||
ch1, ch2, ch3 = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(in_ch1, ch1, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(in_ch2, ch2, cv.NORM_INF))
|
||||
self.assertEqual(0.0, cv.norm(in_ch3, ch3, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_mean(self):
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.imread(img_path)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.mean(in_mat)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = mean(g_in)
|
||||
|
||||
comp = cv.GComputation(g_in, g_out)
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_mean, 'custom.mean'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
# Comparison
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
|
||||
def test_custom_op_addC(self):
|
||||
sz = (3, 3, 3)
|
||||
in_mat = np.full(sz, 45, dtype=np.uint8)
|
||||
sc = (50, 10, 20)
|
||||
|
||||
# Numpy reference, make array from sc to keep uint8 dtype.
|
||||
expected = in_mat + np.array(sc, dtype=np.uint8)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_sc = cv.GScalar()
|
||||
g_out = addC(g_in, g_sc, cv.CV_8UC1)
|
||||
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(g_out))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_addC, 'custom.addC'))
|
||||
actual = comp.apply(cv.gin(in_mat, sc), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_size(self):
|
||||
sz = (100, 150, 3)
|
||||
in_mat = np.full(sz, 45, dtype=np.uint8)
|
||||
|
||||
# Open_cV
|
||||
expected = (100, 150)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_sz = size(g_in)
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_sz))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_size, 'custom.size'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_sizeR(self):
|
||||
# x, y, h, w
|
||||
roi = (10, 15, 100, 150)
|
||||
|
||||
expected = (100, 150)
|
||||
|
||||
# G-API
|
||||
g_r = cv.GOpaqueT(cv.gapi.CV_RECT)
|
||||
g_sz = sizeR(g_r)
|
||||
comp = cv.GComputation(cv.GIn(g_r), cv.GOut(g_sz))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_sizeR, 'custom.sizeR'))
|
||||
actual = comp.apply(cv.gin(roi), args=cv.compile_args(pkg))
|
||||
|
||||
# cv.norm works with tuples ?
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_boundingRect(self):
|
||||
points = [(0,0), (0,1), (1,0), (1,1)]
|
||||
|
||||
# OpenCV
|
||||
expected = cv.boundingRect(np.array(points))
|
||||
|
||||
# G-API
|
||||
g_pts = cv.GArrayT(cv.gapi.CV_POINT)
|
||||
g_br = boundingRect(g_pts)
|
||||
comp = cv.GComputation(cv.GIn(g_pts), cv.GOut(g_br))
|
||||
|
||||
pkg = cv.gapi.wip.kernels((custom_boundingRect, 'custom.boundingRect'))
|
||||
actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg))
|
||||
|
||||
# cv.norm works with tuples ?
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_custom_op_goodFeaturesToTrack(self):
|
||||
# G-API
|
||||
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
|
||||
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
|
||||
|
||||
# NB: goodFeaturesToTrack configuration
|
||||
max_corners = 50
|
||||
quality_lvl = 0.01
|
||||
min_distance = 10
|
||||
block_sz = 3
|
||||
use_harris_detector = True
|
||||
k = 0.04
|
||||
mask = None
|
||||
|
||||
# OpenCV
|
||||
expected = cv.goodFeaturesToTrack(in_mat, max_corners, quality_lvl,
|
||||
min_distance, mask=mask,
|
||||
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = goodFeaturesToTrack(g_in, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz, use_harris_detector, k)
|
||||
|
||||
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
pkg = cv.gapi.wip.kernels((custom_goodFeaturesToTrack, 'custom.goodFeaturesToTrack'))
|
||||
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
|
||||
|
||||
# NB: OpenCV & G-API have different output types.
|
||||
# OpenCV - numpy array with shape (num_points, 1, 2)
|
||||
# G-API - list of tuples with size - num_points
|
||||
# Comparison
|
||||
self.assertEqual(0.0, cv.norm(expected.flatten(),
|
||||
np.array(actual, dtype=np.float32).flatten(), cv.NORM_INF))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
203
modules/gapi/misc/python/test/test_gapi_streaming.py
Normal file
203
modules/gapi/misc/python/test/test_gapi_streaming.py
Normal file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
class test_gapi_streaming(NewOpenCVTests):
|
||||
|
||||
def test_image_input(self):
|
||||
sz = (1280, 720)
|
||||
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
|
||||
|
||||
# OpenCV
|
||||
expected = cv.medianBlur(in_mat, 3)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.medianBlur(g_in, 3)
|
||||
c = cv.GComputation(g_in, g_out)
|
||||
ccomp = c.compileStreaming(cv.descr_of(in_mat))
|
||||
ccomp.setSource(cv.gin(in_mat))
|
||||
ccomp.start()
|
||||
|
||||
_, actual = ccomp.pull()
|
||||
|
||||
# Assert
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
|
||||
def test_video_input(self):
|
||||
ksize = 3
|
||||
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
|
||||
|
||||
# OpenCV
|
||||
cap = cv.VideoCapture(path)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_out = cv.gapi.medianBlur(g_in, ksize)
|
||||
c = cv.GComputation(g_in, g_out)
|
||||
|
||||
ccomp = c.compileStreaming()
|
||||
source = cv.gapi.wip.make_capture_src(path)
|
||||
ccomp.setSource(source)
|
||||
ccomp.start()
|
||||
|
||||
# Assert
|
||||
max_num_frames = 10
|
||||
proc_num_frames = 0
|
||||
while cap.isOpened():
|
||||
has_expected, expected = cap.read()
|
||||
has_actual, actual = ccomp.pull()
|
||||
|
||||
self.assertEqual(has_expected, has_actual)
|
||||
|
||||
if not has_actual:
|
||||
break
|
||||
|
||||
self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF))
|
||||
|
||||
proc_num_frames += 1
|
||||
if proc_num_frames == max_num_frames:
|
||||
break;
|
||||
|
||||
|
||||
def test_video_split3(self):
|
||||
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
|
||||
|
||||
# OpenCV
|
||||
cap = cv.VideoCapture(path)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
b, g, r = cv.gapi.split3(g_in)
|
||||
c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
|
||||
|
||||
ccomp = c.compileStreaming()
|
||||
source = cv.gapi.wip.make_capture_src(path)
|
||||
ccomp.setSource(source)
|
||||
ccomp.start()
|
||||
|
||||
# Assert
|
||||
max_num_frames = 10
|
||||
proc_num_frames = 0
|
||||
while cap.isOpened():
|
||||
has_expected, frame = cap.read()
|
||||
has_actual, actual = ccomp.pull()
|
||||
|
||||
self.assertEqual(has_expected, has_actual)
|
||||
|
||||
if not has_actual:
|
||||
break
|
||||
|
||||
expected = cv.split(frame)
|
||||
for e, a in zip(expected, actual):
|
||||
self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF))
|
||||
|
||||
proc_num_frames += 1
|
||||
if proc_num_frames == max_num_frames:
|
||||
break;
|
||||
|
||||
|
||||
def test_video_add(self):
|
||||
sz = (576, 768, 3)
|
||||
in_mat = np.random.randint(0, 100, sz).astype(np.uint8)
|
||||
|
||||
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
|
||||
|
||||
# OpenCV
|
||||
cap = cv.VideoCapture(path)
|
||||
|
||||
# G-API
|
||||
g_in1 = cv.GMat()
|
||||
g_in2 = cv.GMat()
|
||||
out = cv.gapi.add(g_in1, g_in2)
|
||||
c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out))
|
||||
|
||||
ccomp = c.compileStreaming()
|
||||
source = cv.gapi.wip.make_capture_src(path)
|
||||
ccomp.setSource(cv.gin(source, in_mat))
|
||||
ccomp.start()
|
||||
|
||||
# Assert
|
||||
max_num_frames = 10
|
||||
proc_num_frames = 0
|
||||
while cap.isOpened():
|
||||
has_expected, frame = cap.read()
|
||||
has_actual, actual = ccomp.pull()
|
||||
|
||||
self.assertEqual(has_expected, has_actual)
|
||||
|
||||
if not has_actual:
|
||||
break
|
||||
|
||||
expected = cv.add(frame, in_mat)
|
||||
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
|
||||
|
||||
proc_num_frames += 1
|
||||
if proc_num_frames == max_num_frames:
|
||||
break;
|
||||
|
||||
|
||||
def test_video_good_features_to_track(self):
|
||||
path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']])
|
||||
|
||||
# NB: goodFeaturesToTrack configuration
|
||||
max_corners = 50
|
||||
quality_lvl = 0.01
|
||||
min_distance = 10
|
||||
block_sz = 3
|
||||
use_harris_detector = True
|
||||
k = 0.04
|
||||
mask = None
|
||||
|
||||
# OpenCV
|
||||
cap = cv.VideoCapture(path)
|
||||
|
||||
# G-API
|
||||
g_in = cv.GMat()
|
||||
g_gray = cv.gapi.RGB2Gray(g_in)
|
||||
g_out = cv.gapi.goodFeaturesToTrack(g_gray, max_corners, quality_lvl,
|
||||
min_distance, mask, block_sz, use_harris_detector, k)
|
||||
|
||||
c = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
|
||||
|
||||
ccomp = c.compileStreaming()
|
||||
source = cv.gapi.wip.make_capture_src(path)
|
||||
ccomp.setSource(source)
|
||||
ccomp.start()
|
||||
|
||||
# Assert
|
||||
max_num_frames = 10
|
||||
proc_num_frames = 0
|
||||
while cap.isOpened():
|
||||
has_expected, frame = cap.read()
|
||||
has_actual, actual = ccomp.pull()
|
||||
|
||||
self.assertEqual(has_expected, has_actual)
|
||||
|
||||
if not has_actual:
|
||||
break
|
||||
|
||||
# OpenCV
|
||||
frame = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
|
||||
expected = cv.goodFeaturesToTrack(frame, max_corners, quality_lvl,
|
||||
min_distance, mask=mask,
|
||||
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
|
||||
for e, a in zip(expected, actual):
|
||||
# NB: OpenCV & G-API have different output shapes:
|
||||
# OpenCV - (num_points, 1, 2)
|
||||
# G-API - (num_points, 2)
|
||||
self.assertEqual(0.0, cv.norm(e.flatten(),
|
||||
np.array(a, np.float32).flatten(),
|
||||
cv.NORM_INF))
|
||||
|
||||
proc_num_frames += 1
|
||||
if proc_num_frames == max_num_frames:
|
||||
break;
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
32
modules/gapi/misc/python/test/test_gapi_types.py
Normal file
32
modules/gapi/misc/python/test/test_gapi_types.py
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import os
|
||||
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
class gapi_types_test(NewOpenCVTests):
|
||||
|
||||
def test_garray_type(self):
|
||||
types = [cv.gapi.CV_BOOL , cv.gapi.CV_INT , cv.gapi.CV_DOUBLE , cv.gapi.CV_FLOAT,
|
||||
cv.gapi.CV_STRING, cv.gapi.CV_POINT , cv.gapi.CV_POINT2F, cv.gapi.CV_SIZE ,
|
||||
cv.gapi.CV_RECT , cv.gapi.CV_SCALAR, cv.gapi.CV_MAT , cv.gapi.CV_GMAT]
|
||||
|
||||
for t in types:
|
||||
g_array = cv.GArrayT(t)
|
||||
self.assertEqual(t, g_array.type())
|
||||
|
||||
|
||||
def test_gopaque_type(self):
|
||||
types = [cv.gapi.CV_BOOL , cv.gapi.CV_INT , cv.gapi.CV_DOUBLE , cv.gapi.CV_FLOAT,
|
||||
cv.gapi.CV_STRING, cv.gapi.CV_POINT , cv.gapi.CV_POINT2F, cv.gapi.CV_SIZE ,
|
||||
cv.gapi.CV_RECT]
|
||||
|
||||
for t in types:
|
||||
g_opaque = cv.GOpaqueT(t)
|
||||
self.assertEqual(t, g_opaque.type())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
Reference in New Issue
Block a user