init - 初始化项目

This commit is contained in:
Lee Nony
2022-05-06 01:58:53 +08:00
commit 90a5cc7cb6
6772 changed files with 2837787 additions and 0 deletions

43
modules/ts/CMakeLists.txt Normal file
View File

@@ -0,0 +1,43 @@
set(the_description "The ts module")
if(NOT BUILD_opencv_ts AND NOT BUILD_TESTS AND NOT BUILD_PERF_TESTS)
ocv_module_disable(ts)
endif()
set(OPENCV_MODULE_TYPE STATIC)
set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
if(WINRT)
# WINRT doesn't have access to environment variables
# so adding corresponding macros during CMake run
add_env_definitions(OPENCV_TEST_DATA_PATH)
add_env_definitions(OPENCV_PERF_VALIDATION_DIR)
endif()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
ocv_add_module(ts INTERNAL opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui)
ocv_glob_module_sources()
ocv_module_include_directories()
ocv_create_module()
# generate config file
set(OPENCV_TESTS_CONFIG_FILE "${CMAKE_BINARY_DIR}/opencv_tests_config.hpp")
set(OPENCV_TESTS_CONFIG_STR "")
if(CMAKE_INSTALL_PREFIX)
set(OPENCV_TESTS_CONFIG_STR "${OPENCV_TESTS_CONFIG_STR}
#define OPENCV_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\"
")
endif()
if(OPENCV_TEST_DATA_INSTALL_PATH)
set(OPENCV_TESTS_CONFIG_STR "${OPENCV_TESTS_CONFIG_STR}
#define OPENCV_TEST_DATA_INSTALL_PATH \"${OPENCV_TEST_DATA_INSTALL_PATH}\"
")
endif()
if(EXISTS "${OPENCV_TESTS_CONFIG_FILE}")
file(READ "${OPENCV_TESTS_CONFIG_FILE}" __content)
endif()
if(NOT OPENCV_TESTS_CONFIG_STR STREQUAL "${__content}")
file(WRITE "${OPENCV_TESTS_CONFIG_FILE}" "${OPENCV_TESTS_CONFIG_STR}")
endif()

View File

@@ -0,0 +1,959 @@
#ifndef OPENCV_TS_HPP
#define OPENCV_TS_HPP
#ifndef __OPENCV_TESTS
#define __OPENCV_TESTS 1
#endif
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/utils/trace.hpp"
#include "opencv2/core/hal/hal.hpp"
#include <stdarg.h> // for va_list
#include "cvconfig.h"
#include <cmath>
#include <vector>
#include <list>
#include <map>
#include <queue>
#include <string>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <cstdio>
#include <iterator>
#include <limits>
#include <algorithm>
#ifndef OPENCV_32BIT_CONFIGURATION
# if defined(INTPTR_MAX) && defined(INT32_MAX) && INTPTR_MAX == INT32_MAX
# define OPENCV_32BIT_CONFIGURATION 1
# elif defined(_WIN32) && !defined(_WIN64)
# define OPENCV_32BIT_CONFIGURATION 1
# endif
#else
# if OPENCV_32BIT_CONFIGURATION == 0
# undef OPENCV_32BIT_CONFIGURATION
# endif
#endif
// most part of OpenCV tests are fit into 200Mb limit, but some tests are not:
// Note: due memory fragmentation real limits are usually lower on 20-25% (400Mb memory usage goes into mem_1Gb class)
#define CV_TEST_TAG_MEMORY_512MB "mem_512mb" // used memory: 200..512Mb - enabled by default
#define CV_TEST_TAG_MEMORY_1GB "mem_1gb" // used memory: 512Mb..1Gb - enabled by default
#define CV_TEST_TAG_MEMORY_2GB "mem_2gb" // used memory: 1..2Gb - enabled by default on 64-bit configuration (32-bit - disabled)
#define CV_TEST_TAG_MEMORY_6GB "mem_6gb" // used memory: 2..6Gb - disabled by default
#define CV_TEST_TAG_MEMORY_14GB "mem_14gb" // used memory: 6..14Gb - disabled by default
// Large / huge video streams or complex workloads
#define CV_TEST_TAG_LONG "long" // 5+ seconds on modern desktop machine (single thread)
#define CV_TEST_TAG_VERYLONG "verylong" // 20+ seconds on modern desktop machine (single thread)
// Large / huge video streams or complex workloads for debug builds
#define CV_TEST_TAG_DEBUG_LONG "debug_long" // 10+ seconds on modern desktop machine (single thread)
#define CV_TEST_TAG_DEBUG_VERYLONG "debug_verylong" // 40+ seconds on modern desktop machine (single thread)
// Lets skip processing of high resolution images via instrumentation tools (valgrind/coverage/sanitizers).
// It is enough to run lower resolution (VGA: 640x480) tests.
#define CV_TEST_TAG_SIZE_HD "size_hd" // 720p+, enabled
#define CV_TEST_TAG_SIZE_FULLHD "size_fullhd" // 1080p+, enabled (disable these tests for valgrind/coverage run)
#define CV_TEST_TAG_SIZE_4K "size_4k" // 2160p+, enabled (disable these tests for valgrind/coverage run)
// Other misc test tags
#define CV_TEST_TAG_TYPE_64F "type_64f" // CV_64F, enabled (disable these tests on low power embedded devices)
// Kernel-based image processing
#define CV_TEST_TAG_FILTER_SMALL "filter_small" // Filtering with kernels <= 3x3
#define CV_TEST_TAG_FILTER_MEDIUM "filter_medium" // Filtering with kernels: 3x3 < kernel <= 5x5
#define CV_TEST_TAG_FILTER_LARGE "filter_large" // Filtering with kernels: 5x5 < kernel <= 9x9
#define CV_TEST_TAG_FILTER_HUGE "filter_huge" // Filtering with kernels: > 9x9
// Other tests categories
#define CV_TEST_TAG_OPENCL "opencl" // Tests with OpenCL
#ifdef WINRT
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
#endif
#ifdef _MSC_VER
#pragma warning( disable: 4503 ) // decorated name length exceeded, name was truncated
#endif
#define GTEST_DONT_DEFINE_FAIL 0
#define GTEST_DONT_DEFINE_SUCCEED 0
#define GTEST_DONT_DEFINE_ASSERT_EQ 0
#define GTEST_DONT_DEFINE_ASSERT_NE 0
#define GTEST_DONT_DEFINE_ASSERT_LE 0
#define GTEST_DONT_DEFINE_ASSERT_LT 0
#define GTEST_DONT_DEFINE_ASSERT_GE 0
#define GTEST_DONT_DEFINE_ASSERT_GT 0
#define GTEST_DONT_DEFINE_TEST 0
#ifndef GTEST_LANG_CXX11
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && !(_MSVC_LANG < 201103))
# define GTEST_LANG_CXX11 1
# define GTEST_HAS_TR1_TUPLE 0
# define GTEST_HAS_COMBINE 1
# endif
#endif
#if defined(__OPENCV_BUILD) && defined(__clang__)
#pragma clang diagnostic ignored "-Winconsistent-missing-override"
#endif
#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
#include "opencv2/ts/ts_gtest.h"
#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
#include "opencv2/ts/ts_ext.hpp"
#ifndef GTEST_USES_SIMPLE_RE
# define GTEST_USES_SIMPLE_RE 0
#endif
#ifndef GTEST_USES_POSIX_RE
# define GTEST_USES_POSIX_RE 0
#endif
#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< testing::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) testing::get< k >(GetParam())
namespace cvtest
{
using std::vector;
using std::map;
using std::string;
using std::stringstream;
using std::cout;
using std::cerr;
using std::endl;
using std::min;
using std::max;
using std::numeric_limits;
using std::pair;
using std::make_pair;
using testing::TestWithParam;
using testing::Values;
using testing::ValuesIn;
using testing::Combine;
using cv::Mat;
using cv::Mat_;
using cv::UMat;
using cv::InputArray;
using cv::OutputArray;
using cv::noArray;
using cv::Range;
using cv::Point;
using cv::Rect;
using cv::Size;
using cv::Scalar;
using cv::RNG;
// Tuple stuff from Google Tests
using testing::get;
using testing::make_tuple;
using testing::tuple;
using testing::tuple_size;
using testing::tuple_element;
namespace details {
class SkipTestExceptionBase: public cv::Exception
{
public:
SkipTestExceptionBase(bool handlingTags);
SkipTestExceptionBase(const cv::String& message, bool handlingTags);
};
}
class SkipTestException: public details::SkipTestExceptionBase
{
public:
int dummy; // workaround for MacOSX Xcode 7.3 bug (don't make class "empty")
SkipTestException() : details::SkipTestExceptionBase(false), dummy(0) {}
SkipTestException(const cv::String& message) : details::SkipTestExceptionBase(message, false), dummy(0) { }
};
/** Apply tag to the current test
Automatically apply corresponding additional tags (for example, 4K => FHD => HD => VGA).
If tag is in skip list, then SkipTestException is thrown
*/
void applyTestTag(const std::string& tag);
/** Run postponed checks of applied test tags
If tag is in skip list, then SkipTestException is thrown
*/
void checkTestTags();
void applyTestTag_(const std::string& tag);
static inline void applyTestTag(const std::string& tag1, const std::string& tag2)
{ applyTestTag_(tag1); applyTestTag_(tag2); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4, const std::string& tag5)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); applyTestTag_(tag5); checkTestTags(); }
/** Append global skip test tags
*/
void registerGlobalSkipTag(const std::string& skipTag);
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4); }
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5);
}
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5, const std::string& tag6)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6);
}
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
const std::string& tag5, const std::string& tag6, const std::string& tag7)
{
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6); registerGlobalSkipTag(tag7);
}
class TS;
int64 readSeed(const char* str);
void randUni( RNG& rng, Mat& a, const Scalar& param1, const Scalar& param2 );
inline unsigned randInt( RNG& rng )
{
return (unsigned)rng;
}
inline double randReal( RNG& rng )
{
return (double)rng;
}
const char* getTypeName( int type );
int typeByName( const char* type_name );
string vec2str(const string& sep, const int* v, size_t nelems);
inline int clipInt( int val, int min_val, int max_val )
{
if( val < min_val )
val = min_val;
if( val > max_val )
val = max_val;
return val;
}
double getMinVal(int depth);
double getMaxVal(int depth);
Size randomSize(RNG& rng, double maxSizeLog);
void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
int randomType(RNG& rng, cv::_OutputArray::DepthMask typeMask, int minChannels, int maxChannels);
Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi);
Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi);
void add(const Mat& a, double alpha, const Mat& b, double beta,
Scalar gamma, Mat& c, int ctype, bool calcAbs=false);
void multiply(const Mat& a, const Mat& b, Mat& c, double alpha=1);
void divide(const Mat& a, const Mat& b, Mat& c, double alpha=1);
void convert(const Mat& src, cv::OutputArray dst, int dtype, double alpha=1, double beta=0);
void copy(const Mat& src, Mat& dst, const Mat& mask=Mat(), bool invertMask=false);
void set(Mat& dst, const Scalar& gamma, const Mat& mask=Mat());
// working with multi-channel arrays
void extract( const Mat& a, Mat& plane, int coi );
void insert( const Mat& plane, Mat& a, int coi );
// checks that the array does not have NaNs and/or Infs and all the elements are
// within [min_val,max_val). idx is the index of the first "bad" element.
int check( const Mat& data, double min_val, double max_val, vector<int>* idx );
// modifies values that are close to zero
void patchZeros( Mat& mat, double level );
void transpose(const Mat& src, Mat& dst);
void erode(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
int borderType=0, const Scalar& borderValue=Scalar());
void dilate(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
int borderType=0, const Scalar& borderValue=Scalar());
void filter2D(const Mat& src, Mat& dst, int ddepth, const Mat& kernel,
Point anchor, double delta, int borderType,
const Scalar& borderValue=Scalar());
void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
int borderType, const Scalar& borderValue=Scalar());
Mat calcSobelKernel2D( int dx, int dy, int apertureSize, int origin=0 );
Mat calcLaplaceKernel2D( int aperture_size );
void initUndistortMap( const Mat& a, const Mat& k, const Mat& R, const Mat& new_a, Size sz, Mat& mapx, Mat& mapy, int map_type );
void minMaxLoc(const Mat& src, double* minval, double* maxval,
vector<int>* minloc, vector<int>* maxloc, const Mat& mask=Mat());
double norm(InputArray src, int normType, InputArray mask=noArray());
double norm(InputArray src1, InputArray src2, int normType, InputArray mask=noArray());
Scalar mean(const Mat& src, const Mat& mask=Mat());
double PSNR(InputArray src1, InputArray src2);
bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, double* realMaxDiff, vector<int>* idx);
// compares two arrays. max_diff is the maximum actual difference,
// success_err_level is maximum allowed difference, idx is the index of the first
// element for which difference is >success_err_level
// (or index of element with the maximum difference)
int cmpEps( const Mat& data, const Mat& refdata, double* max_diff,
double success_err_level, vector<int>* idx,
bool element_wise_relative_error );
// a wrapper for the previous function. in case of error prints the message to log file.
int cmpEps2( TS* ts, const Mat& data, const Mat& refdata, double success_err_level,
bool element_wise_relative_error, const char* desc );
int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
double eps, const char* param_name );
void logicOp(const Mat& src1, const Mat& src2, Mat& dst, char c);
void logicOp(const Mat& src, const Scalar& s, Mat& dst, char c);
void min(const Mat& src1, const Mat& src2, Mat& dst);
void min(const Mat& src, double s, Mat& dst);
void max(const Mat& src1, const Mat& src2, Mat& dst);
void max(const Mat& src, double s, Mat& dst);
void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop);
void compare(const Mat& src, double s, Mat& dst, int cmpop);
void gemm(const Mat& src1, const Mat& src2, double alpha,
const Mat& src3, double beta, Mat& dst, int flags);
void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& shift );
double crossCorr(const Mat& src1, const Mat& src2);
void threshold( const Mat& src, Mat& dst, double thresh, double maxval, int thresh_type );
void minMaxIdx( InputArray _img, double* minVal, double* maxVal,
Point* minLoc, Point* maxLoc, InputArray _mask );
struct MatInfo
{
MatInfo(const Mat& _m) : m(&_m) {}
const Mat* m;
};
std::ostream& operator << (std::ostream& out, const MatInfo& m);
struct MatComparator
{
public:
MatComparator(double maxdiff, int context);
::testing::AssertionResult operator()(const char* expr1, const char* expr2,
const Mat& m1, const Mat& m2);
double maxdiff;
double realmaxdiff;
vector<int> loc0;
int context;
};
class BaseTest;
class TS;
class BaseTest
{
public:
// constructor(s) and destructor
BaseTest();
virtual ~BaseTest();
// the main procedure of the test
virtual void run( int start_from );
// the wrapper for run that cares of exceptions
virtual void safe_run( int start_from=0 );
const string& get_name() const { return name; }
// returns true if and only if the different test cases do not depend on each other
// (so that test system could get right to a problematic test case)
virtual bool can_do_fast_forward();
// deallocates all the memory.
// called by init() (before initialization) and by the destructor
virtual void clear();
protected:
int test_case_count; // the total number of test cases
// read test params
virtual int read_params( const cv::FileStorage& fs );
// returns the number of tests or -1 if it is unknown a-priori
virtual int get_test_case_count();
// prepares data for the next test case. rng seed is updated by the function
virtual int prepare_test_case( int test_case_idx );
// checks if the test output is valid and accurate
virtual int validate_test_results( int test_case_idx );
// calls the tested function. the method is called from run_test_case()
virtual void run_func(); // runs tested func(s)
// updates progress bar
virtual int update_progress( int progress, int test_case_idx, int count, double dt );
// dump test case input parameters
virtual void dump_test_case(int test_case_idx, std::ostream* out);
// finds test parameter
cv::FileNode find_param( const cv::FileStorage& fs, const char* param_name );
// name of the test (it is possible to locate a test by its name)
string name;
// pointer to the system that includes the test
TS* ts;
};
/*****************************************************************************************\
* Information about a failed test *
\*****************************************************************************************/
struct TestInfo
{
TestInfo();
// pointer to the test
BaseTest* test;
// failure code (TS::FAIL_*)
int code;
// seed value right before the data for the failed test case is prepared.
uint64 rng_seed;
// seed value right before running the test
uint64 rng_seed0;
// index of test case, can be then passed to BaseTest::proceed_to_test_case()
int test_case_idx;
};
/*****************************************************************************************\
* Base Class for test system *
\*****************************************************************************************/
// common parameters:
struct TSParams
{
TSParams();
// RNG seed, passed to and updated by every test executed.
uint64 rng_seed;
// whether to use IPP, MKL etc. or not
bool use_optimized;
// extensivity of the tests, scale factor for test_case_count
double test_case_count_scale;
};
class TS
{
TS();
virtual ~TS();
public:
enum
{
NUL=0,
SUMMARY_IDX=0,
SUMMARY=1 << SUMMARY_IDX,
LOG_IDX=1,
LOG=1 << LOG_IDX,
CSV_IDX=2,
CSV=1 << CSV_IDX,
CONSOLE_IDX=3,
CONSOLE=1 << CONSOLE_IDX,
MAX_IDX=4
};
static TS* ptr();
// initialize test system before running the first test
virtual void init( const string& modulename );
// low-level printing functions that are used by individual tests and by the system itself
virtual void printf( int streams, const char* fmt, ... );
virtual void vprintf( int streams, const char* fmt, va_list arglist );
// updates the context: current test, test case, rng state
virtual void update_context( BaseTest* test, int test_case_idx, bool update_ts_context );
const TestInfo* get_current_test_info() { return &current_test_info; }
// sets information about a failed test
virtual void set_failed_test_info( int fail_code );
virtual void set_gtest_status();
// test error codes
enum FailureCode
{
// everything is Ok
OK=0,
// generic error: stub value to be used
// temporarily if the error's cause is unknown
FAIL_GENERIC=-1,
// the test is missing some essential data to proceed further
FAIL_MISSING_TEST_DATA=-2,
// the tested function raised an error via cxcore error handler
FAIL_ERROR_IN_CALLED_FUNC=-3,
// an exception has been raised;
// for memory and arithmetic exception
// there are two specialized codes (see below...)
FAIL_EXCEPTION=-4,
// a memory exception
// (access violation, access to missed page, stack overflow etc.)
FAIL_MEMORY_EXCEPTION=-5,
// arithmetic exception (overflow, division by zero etc.)
FAIL_ARITHM_EXCEPTION=-6,
// the tested function corrupted memory (no exception have been raised)
FAIL_MEMORY_CORRUPTION_BEGIN=-7,
FAIL_MEMORY_CORRUPTION_END=-8,
// the tested function (or test itself) do not deallocate some memory
FAIL_MEMORY_LEAK=-9,
// the tested function returned invalid object, e.g. matrix, containing NaNs,
// structure with NULL or out-of-range fields (while it should not)
FAIL_INVALID_OUTPUT=-10,
// the tested function returned valid object, but it does not match
// the original (or produced by the test) object
FAIL_MISMATCH=-11,
// the tested function returned valid object (a single number or numerical array),
// but it differs too much from the original (or produced by the test) object
FAIL_BAD_ACCURACY=-12,
// the tested function hung. Sometimes, it can be determined by unexpectedly long
// processing time (in this case there should be possibility to interrupt such a function
FAIL_HANG=-13,
// unexpected response on passing bad arguments to the tested function
// (the function crashed, proceed successfully (while it should not), or returned
// error code that is different from what is expected)
FAIL_BAD_ARG_CHECK=-14,
// the test data (in whole or for the particular test case) is invalid
FAIL_INVALID_TEST_DATA=-15,
// the test has been skipped because it is not in the selected subset of the tests to run,
// because it has been run already within the same run with the same parameters, or because
// of some other reason and this is not considered as an error.
// Normally TS::run() (or overridden method in the derived class) takes care of what
// needs to be run, so this code should not occur.
SKIPPED=1
};
// get RNG to generate random input data for a test
RNG& get_rng() { return rng; }
// returns the current error code
TS::FailureCode get_err_code() { return TS::FailureCode(current_test_info.code); }
// returns the test extensivity scale
double get_test_case_count_scale() { return params.test_case_count_scale; }
const string& get_data_path() const { return data_path; }
// returns textual description of failure code
static string str_from_code( const TS::FailureCode code );
std::vector<std::string> data_search_path;
std::vector<std::string> data_search_subdir;
protected:
// these are allocated within a test to try to keep them valid in case of stack corruption
RNG rng;
// information about the current test
TestInfo current_test_info;
// the path to data files used by tests
string data_path;
TSParams params;
std::string output_buf[MAX_IDX];
};
/*****************************************************************************************\
* Subclass of BaseTest for testing functions that process dense arrays *
\*****************************************************************************************/
class ArrayTest : public BaseTest
{
public:
// constructor(s) and destructor
ArrayTest();
virtual ~ArrayTest();
virtual void clear() CV_OVERRIDE;
protected:
virtual int read_params( const cv::FileStorage& fs ) CV_OVERRIDE;
virtual int prepare_test_case( int test_case_idx ) CV_OVERRIDE;
virtual int validate_test_results( int test_case_idx ) CV_OVERRIDE;
virtual void prepare_to_validation( int test_case_idx );
virtual void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
virtual void fill_array( int test_case_idx, int i, int j, Mat& arr );
virtual void get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high );
virtual double get_success_error_level( int test_case_idx, int i, int j );
bool cvmat_allowed;
bool iplimage_allowed;
bool optional_mask;
bool element_wise_relative_error;
int min_log_array_size;
int max_log_array_size;
enum { INPUT, INPUT_OUTPUT, OUTPUT, REF_INPUT_OUTPUT, REF_OUTPUT, TEMP, MASK, MAX_ARR };
vector<vector<void*> > test_array;
vector<vector<Mat> > test_mat;
float buf[4];
};
class BadArgTest : public BaseTest
{
public:
// constructor(s) and destructor
BadArgTest();
virtual ~BadArgTest();
protected:
virtual int run_test_case( int expected_code, const string& descr );
virtual void run_func(void) CV_OVERRIDE = 0;
int test_case_idx;
template<class F>
int run_test_case( int expected_code, const string& _descr, F f)
{
int errcount = 0;
bool thrown = false;
const char* descr = _descr.c_str() ? _descr.c_str() : "";
try
{
f();
}
catch(const cv::Exception& e)
{
thrown = true;
if( e.code != expected_code && e.code != cv::Error::StsAssert && e.code != cv::Error::StsError )
{
ts->printf(TS::LOG, "%s (test case #%d): the error code %d is different from the expected %d\n",
descr, test_case_idx, e.code, expected_code);
errcount = 1;
}
}
catch(...)
{
thrown = true;
ts->printf(TS::LOG, "%s (test case #%d): unknown exception was thrown (the function has likely crashed)\n",
descr, test_case_idx);
errcount = 1;
}
if(!thrown)
{
ts->printf(TS::LOG, "%s (test case #%d): no expected exception was thrown\n",
descr, test_case_idx);
errcount = 1;
}
test_case_idx++;
return errcount;
}
};
extern uint64 param_seed;
struct DefaultRngAuto
{
const uint64 old_state;
DefaultRngAuto() : old_state(cv::theRNG().state) { cv::theRNG().state = cvtest::param_seed; }
~DefaultRngAuto() { cv::theRNG().state = old_state; }
DefaultRngAuto& operator=(const DefaultRngAuto&);
};
// test images generation functions
void fillGradient(Mat& img, int delta = 5);
void smoothBorder(Mat& img, const Scalar& color, int delta = 3);
// Utility functions
void addDataSearchPath(const std::string& path);
void addDataSearchSubDirectory(const std::string& subdir);
/*! @brief Try to find requested data file
Search directories:
0. TS::data_search_path (search sub-directories are not used)
1. OPENCV_TEST_DATA_PATH environment variable
2. One of these:
a. OpenCV testdata based on build location: "./" + "share/OpenCV/testdata"
b. OpenCV testdata at install location: CMAKE_INSTALL_PREFIX + "share/OpenCV/testdata"
Search sub-directories:
- addDataSearchSubDirectory()
- modulename from TS::init()
*/
std::string findDataFile(const std::string& relative_path, bool required = true);
/*! @brief Try to find requested data directory
@sa findDataFile
*/
std::string findDataDirectory(const std::string& relative_path, bool required = true);
// Test definitions
class SystemInfoCollector : public testing::EmptyTestEventListener
{
private:
virtual void OnTestProgramStart(const testing::UnitTest&);
};
#ifndef __CV_TEST_EXEC_ARGS
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
#define __CV_TEST_EXEC_ARGS(...) \
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
#else
#define __CV_TEST_EXEC_ARGS(...) \
__VA_ARGS__;
#endif
#endif
void parseCustomOptions(int argc, char **argv);
#define CV_TEST_INIT0_NOOP (void)0
#define CV_TEST_MAIN(resourcesubdir, ...) CV_TEST_MAIN_EX(resourcesubdir, NOOP, __VA_ARGS__)
#define CV_TEST_MAIN_EX(resourcesubdir, INIT0, ...) \
int main(int argc, char **argv) \
{ \
CV_TRACE_FUNCTION(); \
{ CV_TRACE_REGION("INIT"); \
using namespace cvtest; using namespace opencv_test; \
TS* ts = TS::ptr(); \
ts->init(resourcesubdir); \
__CV_TEST_EXEC_ARGS(CV_TEST_INIT0_ ## INIT0) \
::testing::InitGoogleTest(&argc, argv); \
::testing::UnitTest::GetInstance()->listeners().Append(new SystemInfoCollector); \
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
parseCustomOptions(argc, argv); \
} \
return RUN_ALL_TESTS(); \
}
// This usually only makes sense in perf tests with several implementations,
// some of which are not available.
#define CV_TEST_FAIL_NO_IMPL() do { \
::testing::Test::RecordProperty("custom_status", "noimpl"); \
FAIL() << "No equivalent implementation."; \
} while (0)
} //namespace cvtest
#include "opencv2/ts/ts_perf.hpp"
namespace cvtest {
using perf::MatDepth;
using perf::MatType;
}
#ifdef WINRT
#ifndef __FSTREAM_EMULATED__
#define __FSTREAM_EMULATED__
#include <stdlib.h>
#include <fstream>
#include <sstream>
#undef ifstream
#undef ofstream
#define ifstream ifstream_emulated
#define ofstream ofstream_emulated
namespace std {
class ifstream : public stringstream
{
FILE* f;
public:
ifstream(const char* filename, ios_base::openmode mode = ios_base::in)
: f(NULL)
{
string modeStr("r");
printf("Open file (read): %s\n", filename);
if (mode & ios_base::binary)
modeStr += "b";
f = fopen(filename, modeStr.c_str());
if (f == NULL)
{
printf("Can't open file: %s\n", filename);
return;
}
fseek(f, 0, SEEK_END);
size_t sz = ftell(f);
if (sz > 0)
{
char* buf = (char*) malloc(sz);
fseek(f, 0, SEEK_SET);
if (fread(buf, 1, sz, f) == sz)
{
this->str(std::string(buf, sz));
}
free(buf);
}
}
~ifstream() { close(); }
bool is_open() const { return f != NULL; }
void close()
{
if (f)
fclose(f);
f = NULL;
this->str("");
}
};
class ofstream : public stringstream
{
FILE* f;
public:
ofstream(const char* filename, ios_base::openmode mode = ios_base::out)
: f(NULL)
{
open(filename, mode);
}
~ofstream() { close(); }
void open(const char* filename, ios_base::openmode mode = ios_base::out)
{
string modeStr("w+");
if (mode & ios_base::trunc)
modeStr = "w";
if (mode & ios_base::binary)
modeStr += "b";
f = fopen(filename, modeStr.c_str());
printf("Open file (write): %s\n", filename);
if (f == NULL)
{
printf("Can't open file (write): %s\n", filename);
return;
}
}
bool is_open() const { return f != NULL; }
void close()
{
if (f)
{
fwrite(reinterpret_cast<const char *>(this->str().c_str()), this->str().size(), 1, f);
fclose(f);
}
f = NULL;
this->str("");
}
};
} // namespace std
#endif // __FSTREAM_EMULATED__
#endif // WINRT
namespace opencv_test {
using namespace cvtest;
using namespace cv;
#ifdef CV_CXX11
#define CVTEST_GUARD_SYMBOL(name) \
class required_namespace_specificatin_here_for_symbol_ ## name {}; \
using name = required_namespace_specificatin_here_for_symbol_ ## name;
#else
#define CVTEST_GUARD_SYMBOL(name) /* nothing */
#endif
CVTEST_GUARD_SYMBOL(norm)
CVTEST_GUARD_SYMBOL(add)
CVTEST_GUARD_SYMBOL(multiply)
CVTEST_GUARD_SYMBOL(divide)
CVTEST_GUARD_SYMBOL(transpose)
CVTEST_GUARD_SYMBOL(copyMakeBorder)
CVTEST_GUARD_SYMBOL(filter2D)
CVTEST_GUARD_SYMBOL(compare)
CVTEST_GUARD_SYMBOL(minMaxIdx)
CVTEST_GUARD_SYMBOL(threshold)
extern bool required_opencv_test_namespace; // compilation check for non-refactored tests
}
#endif // OPENCV_TS_HPP

View File

@@ -0,0 +1,125 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_PERF_UTILITY_HPP
#define OPENCV_CUDA_PERF_UTILITY_HPP
#include "opencv2/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
namespace perf
{
#define ALL_BORDER_MODES BorderMode::all()
#define ALL_INTERPOLATIONS Interpolation::all()
CV_ENUM(BorderMode, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
CV_ENUM(NormType, NORM_INF, NORM_L1, NORM_L2, NORM_HAMMING, NORM_MINMAX)
enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
#define CUDA_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
#define CUDA_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
#define GET_PARAM(k) testing::get< k >(GetParam())
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< testing::tuple< __VA_ARGS__ > > name
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
DEF_PARAM_TEST_1(Sz, cv::Size);
typedef perf::Size_MatType Sz_Type;
DEF_PARAM_TEST(Sz_Depth, cv::Size, perf::MatDepth);
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, perf::MatDepth, MatCn);
#define CUDA_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
#define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
#define CUDA_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
} while(0)
#define CPU_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat cpu_##mat(mat); \
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
} while(0)
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
struct CvtColorInfo
{
int scn;
int dcn;
int code;
CvtColorInfo() {}
explicit CvtColorInfo(int scn_, int dcn_, int code_) : scn(scn_), dcn(dcn_), code(code_) {}
};
void PrintTo(const CvtColorInfo& info, std::ostream* os);
void printCudaInfo();
void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors = cv::noArray());
#ifdef HAVE_CUDA
#define CV_PERF_TEST_CUDA_MAIN(modulename) \
int main(int argc, char **argv)\
{\
const char * impls[] = { "cuda", "plain" };\
CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, perf::printCudaInfo())\
}
#else
#define CV_PERF_TEST_CUDA_MAIN(modulename) \
int main(int argc, char **argv)\
{\
const char * plain_only[] = { "plain" };\
CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only)\
}
#endif
}
#endif // OPENCV_CUDA_PERF_UTILITY_HPP

View File

@@ -0,0 +1,369 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_CUDA_TEST_UTILITY_HPP
#define OPENCV_CUDA_TEST_UTILITY_HPP
#include "opencv2/ts.hpp"
#include <stdexcept>
#include "opencv2/core/cuda.hpp"
namespace cvtest
{
//////////////////////////////////////////////////////////////////////
// random generators
int randomInt(int minVal, int maxVal);
double randomDouble(double minVal, double maxVal);
cv::Size randomSize(int minVal, int maxVal);
cv::Scalar randomScalar(double minVal, double maxVal);
cv::Mat randomMat(cv::Size size, int type, double minVal = 0.0, double maxVal = 255.0);
//////////////////////////////////////////////////////////////////////
// GpuMat create
cv::cuda::GpuMat createMat(cv::Size size, int type, bool useRoi = false);
cv::cuda::GpuMat loadMat(const cv::Mat& m, bool useRoi = false);
//////////////////////////////////////////////////////////////////////
// Image load
//! read image from testdata folder
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
//! read image from testdata folder and convert it to specified type
cv::Mat readImageType(const std::string& fname, int type);
//////////////////////////////////////////////////////////////////////
// Gpu devices
//! return true if device supports specified feature and gpu module was built with support the feature.
bool supportFeature(const cv::cuda::DeviceInfo& info, cv::cuda::FeatureSet feature);
class DeviceManager
{
public:
static DeviceManager& instance();
void load(int i);
void loadAll();
const std::vector<cv::cuda::DeviceInfo>& values() const { return devices_; }
private:
std::vector<cv::cuda::DeviceInfo> devices_;
};
#define ALL_DEVICES testing::ValuesIn(cvtest::DeviceManager::instance().values())
//////////////////////////////////////////////////////////////////////
// Additional assertion
void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
cv::Mat getMat(cv::InputArray arr);
testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
#undef EXPECT_MAT_NEAR
#define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
#define ASSERT_MAT_NEAR(m1, m2, eps) ASSERT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
#define EXPECT_SCALAR_NEAR(s1, s2, eps) \
{ \
EXPECT_NEAR(s1[0], s2[0], eps); \
EXPECT_NEAR(s1[1], s2[1], eps); \
EXPECT_NEAR(s1[2], s2[2], eps); \
EXPECT_NEAR(s1[3], s2[3], eps); \
}
#define ASSERT_SCALAR_NEAR(s1, s2, eps) \
{ \
ASSERT_NEAR(s1[0], s2[0], eps); \
ASSERT_NEAR(s1[1], s2[1], eps); \
ASSERT_NEAR(s1[2], s2[2], eps); \
ASSERT_NEAR(s1[3], s2[3], eps); \
}
#define EXPECT_POINT2_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
}
#define ASSERT_POINT2_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
}
#define EXPECT_POINT3_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
EXPECT_NEAR(p1.z, p2.z, eps); \
}
#define ASSERT_POINT3_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
ASSERT_NEAR(p1.z, p2.z, eps); \
}
double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
#undef EXPECT_MAT_SIMILAR
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
}
#define ASSERT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
ASSERT_LE(checkSimilarity(mat1, mat2), eps); \
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
#define CUDA_TEST_P(test_case_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
virtual void TestBody(); \
private: \
void UnsafeTestBody(); \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
GetTestCasePatternHolder<test_case_name>(\
#test_case_name, \
::testing::internal::CodeLocation(\
__FILE__, __LINE__))->AddTestPattern(\
#test_case_name, \
#test_name, \
new ::testing::internal::TestMetaFactory< \
GTEST_TEST_CLASS_NAME_(\
test_case_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
{ \
try \
{ \
UnsafeTestBody(); \
} \
catch (...) \
{ \
cv::cuda::resetDevice(); \
throw; \
} \
} \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
// Depth
using perf::MatDepth;
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
// Type
using perf::MatType;
//! return vector with types from specified range.
std::vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end);
//! return vector with all types (depth: CV_8U-CV_64F, channels: 1-4).
const std::vector<MatType>& all_types();
#define ALL_TYPES testing::ValuesIn(all_types())
#define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
// ROI
class UseRoi
{
public:
inline UseRoi(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
void PrintTo(const UseRoi& useRoi, std::ostream* os);
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
// Direct/Inverse
class Inverse
{
public:
inline Inverse(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
void PrintTo(const Inverse& useRoi, std::ostream* os);
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
// Param class
#define IMPLEMENT_PARAM_CLASS(name, type) \
class name \
{ \
public: \
name ( type arg = type ()) : val_(arg) {} \
operator type () const {return val_;} \
private: \
type val_; \
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
IMPLEMENT_PARAM_CLASS(Channels, int)
#define ALL_CHANNELS testing::Values(Channels(1), Channels(2), Channels(3), Channels(4))
#define IMAGE_CHANNELS testing::Values(Channels(1), Channels(3), Channels(4))
// Flags and enums
CV_ENUM(NormCode, NORM_INF, NORM_L1, NORM_L2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX)
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
CV_ENUM(BorderType, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
#define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT), BorderType(cv::BORDER_WRAP))
CV_FLAGS(WarpFlags, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, WARP_INVERSE_MAP)
//////////////////////////////////////////////////////////////////////
// Features2D
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual)
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches);
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const cv::Mat& image);
void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
void parseCudaDeviceOptions(int argc, char **argv);
void printCudaInfo();
}
namespace cv { namespace cuda
{
void PrintTo(const DeviceInfo& info, std::ostream* os);
}}
#ifdef HAVE_CUDA
#define CV_TEST_INIT0_CUDA cvtest::parseCudaDeviceOptions(argc, argv), cvtest::printCudaInfo(), cv::setUseOptimized(false)
#define CV_CUDA_TEST_MAIN(resourcesubdir, ...) \
CV_TEST_MAIN_EX(resourcesubdir, CUDA, __VA_ARGS__)
#else // HAVE_CUDA
#define CV_CUDA_TEST_MAIN(resourcesubdir) \
int main() \
{ \
printf("OpenCV was built without CUDA support\n"); \
return 0; \
}
#endif // HAVE_CUDA
#endif // OPENCV_CUDA_TEST_UTILITY_HPP

View File

@@ -0,0 +1,140 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TS_OCL_PERF_HPP
#define OPENCV_TS_OCL_PERF_HPP
#include "opencv2/ts.hpp"
#include "ocl_test.hpp"
#include "ts_perf.hpp"
namespace cvtest {
namespace ocl {
using namespace perf;
#define OCL_PERF_STRATEGY PERF_STRATEGY_SIMPLE
#define OCL_PERF_TEST(fixture, name) SIMPLE_PERF_TEST(fixture, name)
#define OCL_PERF_TEST_P(fixture, name, params) SIMPLE_PERF_TEST_P(fixture, name, params)
#define SIMPLE_PERF_TEST(fixture, name) \
class OCL##_##fixture##_##name : \
public ::perf::TestBase \
{ \
public: \
OCL##_##fixture##_##name() { } \
protected: \
virtual void PerfTestBody(); \
}; \
TEST_F(OCL##_##fixture##_##name, name) { CV_TRACE_REGION("PERF_TEST: " #fixture "_" #name); declare.strategy(OCL_PERF_STRATEGY); RunPerfTestBody(); } \
void OCL##_##fixture##_##name::PerfTestBody()
#define SIMPLE_PERF_TEST_P(fixture, name, params) \
class OCL##_##fixture##_##name : \
public fixture \
{ \
public: \
OCL##_##fixture##_##name() { } \
protected: \
virtual void PerfTestBody(); \
}; \
TEST_P(OCL##_##fixture##_##name, name) { CV_TRACE_REGION("PERF_TEST_P: " #fixture "_" #name); declare.strategy(OCL_PERF_STRATEGY); RunPerfTestBody(); } \
INSTANTIATE_TEST_CASE_P(/*none*/, OCL##_##fixture##_##name, params); \
void OCL##_##fixture##_##name::PerfTestBody()
#define OCL_SIZE_1 szVGA
#define OCL_SIZE_2 sz720p
#define OCL_SIZE_3 sz1080p
#define OCL_SIZE_4 sz2160p
#define OCL_TEST_SIZES ::testing::Values(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3, OCL_SIZE_4)
#define OCL_TEST_TYPES ::testing::Values(CV_8UC1, CV_32FC1, CV_8UC4, CV_32FC4)
#define OCL_TEST_TYPES_14 OCL_TEST_TYPES
#define OCL_TEST_TYPES_134 ::testing::Values(CV_8UC1, CV_32FC1, CV_8UC3, CV_32FC3, CV_8UC4, CV_32FC4)
#define OCL_PERF_ENUM ::testing::Values
//! deprecated
#define OCL_TEST_CYCLE() \
for (cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer())
//! deprecated
#define OCL_TEST_CYCLE_N(n) \
for (declare.iterations(n), cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer())
//! deprecated
#define OCL_TEST_CYCLE_MULTIRUN(runsNum) \
for (declare.runs(runsNum), cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer()) \
for (int r = 0; r < runsNum; cvtest::ocl::perf::safeFinish(), ++r)
#undef PERF_SAMPLE_BEGIN
#undef PERF_SAMPLE_END
#define PERF_SAMPLE_BEGIN() \
cvtest::ocl::perf::safeFinish(); \
for(; next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer()) \
{ \
CV_TRACE_REGION("iteration");
#define PERF_SAMPLE_END() \
}
namespace perf {
// Check for current device limitation
void checkDeviceMaxMemoryAllocSize(const Size& size, int type, int factor = 1);
// Initialize Mat with random numbers. Range is depends on the data type.
// TODO Parameter type is actually OutputArray
void randu(InputOutputArray dst);
inline void safeFinish()
{
if (cv::ocl::useOpenCL())
cv::ocl::finish();
}
} // namespace perf
using namespace perf;
} // namespace cvtest::ocl
} // namespace cvtest
#endif // OPENCV_TS_OCL_PERF_HPP

View File

@@ -0,0 +1,392 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_TS_OCL_TEST_HPP
#define OPENCV_TS_OCL_TEST_HPP
#include "opencv2/ts.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/types_c.h"
#include "opencv2/core/ocl.hpp"
namespace cvtest {
namespace ocl {
using namespace cv;
using namespace testing;
inline std::vector<UMat> ToUMat(const std::vector<Mat>& src)
{
std::vector<UMat> dst;
dst.resize(src.size());
for (size_t i = 0; i < src.size(); ++i)
{
src[i].copyTo(dst[i]);
}
return dst;
}
inline UMat ToUMat(const Mat& src)
{
UMat dst;
src.copyTo(dst);
return dst;
}
inline UMat ToUMat(InputArray src)
{
UMat dst;
src.getMat().copyTo(dst);
return dst;
}
extern int test_loop_times;
#define MAX_VALUE 357
#define EXPECT_MAT_NORM(mat, eps) \
do \
{ \
EXPECT_LE(TestUtils::checkNorm1(mat), eps) \
} while ((void)0, 0)
#undef EXPECT_MAT_NEAR
#define EXPECT_MAT_NEAR(mat1, mat2, eps) \
do \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(TestUtils::checkNorm2(mat1, mat2), eps) \
<< "Size: " << mat1.size() << std::endl; \
} while ((void)0, 0)
#define EXPECT_MAT_NEAR_RELATIVE(mat1, mat2, eps) \
do \
{ \
ASSERT_EQ((mat1).type(), (mat2).type()); \
ASSERT_EQ((mat1).size(), (mat2).size()); \
EXPECT_LE(TestUtils::checkNormRelative((mat1), (mat2)), eps) \
<< "Size: " << (mat1).size() << std::endl; \
} while ((void)0, 0)
#define EXPECT_MAT_N_DIFF(mat1, mat2, num) \
do \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
Mat diff; \
absdiff(mat1, mat2, diff); \
EXPECT_LE(countNonZero(diff.reshape(1)), num) \
<< "Size: " << mat1.size() << std::endl; \
} while ((void)0, 0)
#define OCL_EXPECT_MAT_N_DIFF(name, eps) \
do \
{ \
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
Mat diff, binary, binary_8; \
absdiff(name ## _roi, u ## name ## _roi, diff); \
Mat mask(diff.size(), CV_8UC(dst.channels()), cv::Scalar::all(255)); \
if (mask.cols > 2 && mask.rows > 2) \
mask(cv::Rect(1, 1, mask.cols - 2, mask.rows - 2)).setTo(0); \
cv::threshold(diff, binary, (double)eps, 255, cv::THRESH_BINARY); \
EXPECT_LE(countNonZero(binary.reshape(1)), (int)(binary.cols*binary.rows*5/1000)) \
<< "Size: " << name ## _roi.size() << std::endl; \
binary.convertTo(binary_8, mask.type()); \
binary_8 = binary_8 & mask; \
EXPECT_LE(countNonZero(binary_8.reshape(1)), (int)((binary_8.cols+binary_8.rows)/100)) \
<< "Size: " << name ## _roi.size() << std::endl; \
} while ((void)0, 0)
#define OCL_EXPECT_MATS_NEAR(name, eps) \
do \
{ \
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
EXPECT_LE(TestUtils::checkNorm2(name ## _roi, u ## name ## _roi), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
Point _offset; \
Size _wholeSize; \
u ## name ## _roi.locateROI(_wholeSize, _offset); \
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
ASSERT_EQ(name.type(), u ## name.type()); \
ASSERT_EQ(name.size(), u ## name.size()); \
EXPECT_LE(TestUtils::checkNorm2(name, u ## name, _mask), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
} while ((void)0, 0)
#define OCL_EXPECT_MATS_NEAR_RELATIVE(name, eps) \
do \
{ \
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
EXPECT_LE(TestUtils::checkNormRelative(name ## _roi, u ## name ## _roi), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
Point _offset; \
Size _wholeSize; \
name ## _roi.locateROI(_wholeSize, _offset); \
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
ASSERT_EQ(name.type(), u ## name.type()); \
ASSERT_EQ(name.size(), u ## name.size()); \
EXPECT_LE(TestUtils::checkNormRelative(name, u ## name, _mask), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
} while ((void)0, 0)
//for sparse matrix
#define OCL_EXPECT_MATS_NEAR_RELATIVE_SPARSE(name, eps) \
do \
{ \
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
EXPECT_LE(TestUtils::checkNormRelativeSparse(name ## _roi, u ## name ## _roi), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
Point _offset; \
Size _wholeSize; \
name ## _roi.locateROI(_wholeSize, _offset); \
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
ASSERT_EQ(name.type(), u ## name.type()); \
ASSERT_EQ(name.size(), u ## name.size()); \
EXPECT_LE(TestUtils::checkNormRelativeSparse(name, u ## name, _mask), eps) \
<< "Size: " << name ## _roi.size() << std::endl; \
} while ((void)0, 0)
#undef EXPECT_MAT_SIMILAR
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
do \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkSimilarity(mat1, mat2), eps) \
<< "Size: " << mat1.size() << std::endl; \
} while ((void)0, 0)
using perf::MatDepth;
using perf::MatType;
#define OCL_RNG_SEED 123456
struct TestUtils
{
cv::RNG rng;
TestUtils()
{
rng = cv::RNG(OCL_RNG_SEED);
}
int randomInt(int minVal, int maxVal)
{
return rng.uniform(minVal, maxVal);
}
double randomDouble(double minVal, double maxVal)
{
return rng.uniform(minVal, maxVal);
}
double randomDoubleLog(double minVal, double maxVal)
{
double logMin = log((double)minVal + 1);
double logMax = log((double)maxVal + 1);
double pow = rng.uniform(logMin, logMax);
double v = exp(pow) - 1;
CV_Assert(v >= minVal && (v < maxVal || (v == minVal && v == maxVal)));
return v;
}
Size randomSize(int minVal, int maxVal)
{
#if 1
return cv::Size((int)randomDoubleLog(minVal, maxVal), (int)randomDoubleLog(minVal, maxVal));
#else
return cv::Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
#endif
}
Size randomSize(int minValX, int maxValX, int minValY, int maxValY)
{
#if 1
return cv::Size((int)randomDoubleLog(minValX, maxValX), (int)randomDoubleLog(minValY, maxValY));
#else
return cv::Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
#endif
}
Scalar randomScalar(double minVal, double maxVal)
{
return Scalar(randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal));
}
Mat randomMat(Size size, int type, double minVal, double maxVal, bool useRoi = false)
{
RNG dataRng(rng.next());
return cvtest::randomMat(dataRng, size, type, minVal, maxVal, useRoi);
}
struct Border
{
int top, bot, lef, rig;
};
Border randomBorder(int minValue = 0, int maxValue = MAX_VALUE)
{
Border border = {
(int)randomDoubleLog(minValue, maxValue),
(int)randomDoubleLog(minValue, maxValue),
(int)randomDoubleLog(minValue, maxValue),
(int)randomDoubleLog(minValue, maxValue)
};
return border;
}
void randomSubMat(Mat& whole, Mat& subMat, const Size& roiSize, const Border& border, int type, double minVal, double maxVal)
{
Size wholeSize = Size(roiSize.width + border.lef + border.rig, roiSize.height + border.top + border.bot);
whole = randomMat(wholeSize, type, minVal, maxVal, false);
subMat = whole(Rect(border.lef, border.top, roiSize.width, roiSize.height));
}
// If the two vectors are not equal, it will return the difference in vector size
// Else it will return (total diff of each 1 and 2 rects covered pixels)/(total 1 rects covered pixels)
// The smaller, the better matched
static double checkRectSimilarity(const cv::Size & sz, std::vector<cv::Rect>& ob1, std::vector<cv::Rect>& ob2);
//! read image from testdata folder.
static cv::Mat readImage(const String &fileName, int flags = cv::IMREAD_COLOR);
static cv::Mat readImageType(const String &fname, int type);
static double checkNorm1(InputArray m, InputArray mask = noArray());
static double checkNorm2(InputArray m1, InputArray m2, InputArray mask = noArray());
static double checkSimilarity(InputArray m1, InputArray m2);
static void showDiff(InputArray _src, InputArray _gold, InputArray _actual, double eps, bool alwaysShow);
static inline double checkNormRelative(InputArray m1, InputArray m2, InputArray mask = noArray())
{
return cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask) /
std::max((double)std::numeric_limits<float>::epsilon(),
(double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF)));
}
static inline double checkNormRelativeSparse(InputArray m1, InputArray m2, InputArray mask = noArray())
{
double norm_inf = cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask);
double norm_rel = norm_inf /
std::max((double)std::numeric_limits<float>::epsilon(),
(double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF)));
return std::min(norm_inf, norm_rel);
}
};
#define TEST_DECLARE_INPUT_PARAMETER(name) Mat name, name ## _roi; UMat u ## name, u ## name ## _roi
#define TEST_DECLARE_OUTPUT_PARAMETER(name) TEST_DECLARE_INPUT_PARAMETER(name)
#define UMAT_UPLOAD_INPUT_PARAMETER(name) \
do \
{ \
name.copyTo(u ## name); \
Size _wholeSize; Point ofs; name ## _roi.locateROI(_wholeSize, ofs); \
u ## name ## _roi = u ## name(Rect(ofs.x, ofs.y, name ## _roi.size().width, name ## _roi.size().height)); \
} while ((void)0, 0)
#define UMAT_UPLOAD_OUTPUT_PARAMETER(name) UMAT_UPLOAD_INPUT_PARAMETER(name)
template <typename T>
struct TSTestWithParam : public TestUtils, public ::testing::TestWithParam<T>
{
};
#undef PARAM_TEST_CASE
#define PARAM_TEST_CASE(name, ...) struct name : public ::cvtest::ocl::TSTestWithParam< testing::tuple< __VA_ARGS__ > >
#ifndef IMPLEMENT_PARAM_CLASS
#define IMPLEMENT_PARAM_CLASS(name, type) \
class name \
{ \
public: \
name ( type arg = type ()) : val_(arg) {} \
operator type () const {return val_;} \
private: \
type val_; \
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
IMPLEMENT_PARAM_CLASS(Channels, int)
#endif // IMPLEMENT_PARAM_CLASS
#define OCL_TEST_P TEST_P
#define OCL_TEST_F(name, ...) typedef name OCL_##name; TEST_F(OCL_##name, __VA_ARGS__)
#define OCL_TEST(name, ...) TEST(OCL_##name, __VA_ARGS__)
#define OCL_OFF(...) cv::ocl::setUseOpenCL(false); __VA_ARGS__ ;
#define OCL_ON(...) cv::ocl::setUseOpenCL(true); __VA_ARGS__ ;
#define OCL_ALL_DEPTHS Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F)
#define OCL_ALL_DEPTHS_16F Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F, CV_16F)
#define OCL_ALL_CHANNELS Values(1, 2, 3, 4)
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LINEAR_EXACT)
CV_ENUM(ThreshOp, THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV)
CV_ENUM(BorderType, BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101)
#define OCL_INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
INSTANTIATE_TEST_CASE_P(OCL_ ## prefix, test_case_name, generator)
} } // namespace cvtest::ocl
namespace opencv_test {
namespace ocl {
using namespace cvtest::ocl;
}} // namespace
#endif // OPENCV_TS_OCL_TEST_HPP

View File

@@ -0,0 +1,196 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2014, Intel, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef OPENCV_TS_EXT_HPP
#define OPENCV_TS_EXT_HPP
namespace cvtest {
void checkIppStatus();
extern bool skipUnstableTests;
extern bool runBigDataTests;
extern int testThreads;
extern int debugLevel; //< 0 - no debug, 1 - basic test debug information, >1 - extra debug information
void testSetUp();
void testTearDown();
bool checkBigDataTests();
}
// check for required "opencv_test" namespace
#if !defined(CV_TEST_SKIP_NAMESPACE_CHECK) && defined(__OPENCV_BUILD)
#define CV__TEST_NAMESPACE_CHECK required_opencv_test_namespace = true;
#else
#define CV__TEST_NAMESPACE_CHECK // nothing
#endif
#define CV__TEST_INIT \
CV__TEST_NAMESPACE_CHECK \
::cvtest::testSetUp();
#define CV__TEST_CLEANUP ::cvtest::testTearDown();
#define CV__TEST_BODY_IMPL(name) \
{ \
CV__TRACE_APP_FUNCTION_NAME(name); \
try { \
CV__TEST_INIT \
Body(); \
CV__TEST_CLEANUP \
} \
catch (const cvtest::details::SkipTestExceptionBase& e) \
{ \
printf("[ SKIP ] %s\n", e.what()); \
} \
} \
#undef TEST
#define TEST_(test_case_name, test_name, parent_class, bodyMethodName, BODY_IMPL) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
public:\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
private:\
virtual void TestBody() CV_OVERRIDE;\
virtual void bodyMethodName();\
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
};\
\
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
::test_info_ =\
::testing::internal::MakeAndRegisterTestInfo(\
#test_case_name, #test_name, NULL, NULL, \
::testing::internal::CodeLocation(__FILE__, __LINE__), \
(::testing::internal::GetTestTypeId()), \
parent_class::SetUpTestCase, \
parent_class::TearDownTestCase, \
new ::testing::internal::TestFactoryImpl<\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() BODY_IMPL( #test_case_name "_" #test_name ) \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::bodyMethodName()
#define TEST(test_case_name, test_name) TEST_(test_case_name, test_name, ::testing::Test, Body, CV__TEST_BODY_IMPL)
#define CV__TEST_BIGDATA_BODY_IMPL(name) \
{ \
if (!cvtest::checkBigDataTests()) \
{ \
return; \
} \
CV__TRACE_APP_FUNCTION_NAME(name); \
try { \
CV__TEST_INIT \
Body(); \
CV__TEST_CLEANUP \
} \
catch (const cvtest::details::SkipTestExceptionBase& e) \
{ \
printf("[ SKIP ] %s\n", e.what()); \
} \
} \
// Special type of tests which require / use or validate processing of huge amount of data (>= 2Gb)
#if defined(_M_X64) || defined(_M_ARM64) || defined(__x86_64__) || defined(__aarch64__)
#define BIGDATA_TEST(test_case_name, test_name) TEST_(BigData_ ## test_case_name, test_name, ::testing::Test, Body, CV__TEST_BIGDATA_BODY_IMPL)
#else
#define BIGDATA_TEST(test_case_name, test_name) TEST_(BigData_ ## test_case_name, DISABLED_ ## test_name, ::testing::Test, Body, CV__TEST_BIGDATA_BODY_IMPL)
#endif
#undef TEST_F
#define TEST_F(test_fixture, test_name)\
class GTEST_TEST_CLASS_NAME_(test_fixture, test_name) : public test_fixture {\
public:\
GTEST_TEST_CLASS_NAME_(test_fixture, test_name)() {}\
private:\
virtual void TestBody() CV_OVERRIDE;\
virtual void Body(); \
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_fixture, test_name));\
};\
\
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_fixture, test_name)\
::test_info_ =\
::testing::internal::MakeAndRegisterTestInfo(\
#test_fixture, #test_name, NULL, NULL, \
::testing::internal::CodeLocation(__FILE__, __LINE__), \
(::testing::internal::GetTypeId<test_fixture>()), \
test_fixture::SetUpTestCase, \
test_fixture::TearDownTestCase, \
new ::testing::internal::TestFactoryImpl<\
GTEST_TEST_CLASS_NAME_(test_fixture, test_name)>);\
void GTEST_TEST_CLASS_NAME_(test_fixture, test_name)::TestBody() CV__TEST_BODY_IMPL( #test_fixture "_" #test_name ) \
void GTEST_TEST_CLASS_NAME_(test_fixture, test_name)::Body()
// Don't use directly
#define CV__TEST_P(test_case_name, test_name, bodyMethodName, BODY_IMPL/*(name_str)*/) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
private: \
virtual void bodyMethodName(); \
virtual void TestBody() CV_OVERRIDE; \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
GetTestCasePatternHolder<test_case_name>(\
#test_case_name, \
::testing::internal::CodeLocation(\
__FILE__, __LINE__))->AddTestPattern(\
#test_case_name, \
#test_name, \
new ::testing::internal::TestMetaFactory< \
GTEST_TEST_CLASS_NAME_(\
test_case_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() BODY_IMPL( #test_case_name "_" #test_name ) \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::bodyMethodName()
#undef TEST_P
#define TEST_P(test_case_name, test_name) CV__TEST_P(test_case_name, test_name, Body, CV__TEST_BODY_IMPL)
#define CV_TEST_EXPECT_EXCEPTION_MESSAGE(statement, msg) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \
const char* msg_ = msg; \
bool hasException = false; \
try { \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} \
catch (const cv::Exception& e) { \
if (NULL == strstr(e.what(), msg_)) \
ADD_FAILURE() << "Unexpected cv::Exception is raised: " << #statement << "\n Expected message substring: '" << msg_ << "'. Actual message:\n" << e.what(); \
hasException = true; \
} \
catch (const std::exception& e) { \
ADD_FAILURE() << "Unexpected std::exception is raised: " << #statement << "\n" << e.what(); \
hasException = true; \
} \
catch (...) { \
ADD_FAILURE() << "Unexpected C++ exception is raised: " << #statement; \
hasException = true; \
} \
if (!hasException) { \
goto GTEST_CONCAT_TOKEN_(gtest_label_test_, __LINE__); \
} \
} else \
GTEST_CONCAT_TOKEN_(gtest_label_test_, __LINE__): \
ADD_FAILURE() << "Failed: Expected: " #statement " throws an '" << msg << "' exception.\n" \
" Actual: it doesn't."
#endif // OPENCV_TS_EXT_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,731 @@
#ifndef OPENCV_TS_PERF_HPP
#define OPENCV_TS_PERF_HPP
#include "opencv2/ts.hpp"
#include "ts_ext.hpp"
#include <functional>
#if !(defined(LOGD) || defined(LOGI) || defined(LOGW) || defined(LOGE))
# if defined(__ANDROID__) && defined(USE_ANDROID_LOGGING)
# include <android/log.h>
# define PERF_TESTS_LOG_TAG "OpenCV_perf"
# define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, PERF_TESTS_LOG_TAG, __VA_ARGS__))
# define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, PERF_TESTS_LOG_TAG, __VA_ARGS__))
# define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, PERF_TESTS_LOG_TAG, __VA_ARGS__))
# define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, PERF_TESTS_LOG_TAG, __VA_ARGS__))
# else
# define LOGD(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
# define LOGI(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
# define LOGW(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
# define LOGE(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
# endif
#endif
// declare major namespaces to avoid errors on unknown namespace
namespace cv { namespace cuda {} namespace ocl {} }
namespace cvtest { }
namespace perf
{
// Tuple stuff from Google Tests
using testing::get;
using testing::make_tuple;
using testing::tuple;
using testing::tuple_size;
using testing::tuple_element;
class TestBase;
/*****************************************************************************************\
* Predefined typical frame sizes and typical test parameters *
\*****************************************************************************************/
const static cv::Size szQVGA = cv::Size(320, 240);
const static cv::Size szVGA = cv::Size(640, 480);
const static cv::Size szSVGA = cv::Size(800, 600);
const static cv::Size szXGA = cv::Size(1024, 768);
const static cv::Size szSXGA = cv::Size(1280, 1024);
const static cv::Size szWQHD = cv::Size(2560, 1440);
const static cv::Size sznHD = cv::Size(640, 360);
const static cv::Size szqHD = cv::Size(960, 540);
const static cv::Size sz240p = szQVGA;
const static cv::Size sz720p = cv::Size(1280, 720);
const static cv::Size sz1080p = cv::Size(1920, 1080);
const static cv::Size sz1440p = szWQHD;
const static cv::Size sz2160p = cv::Size(3840, 2160);//UHDTV1 4K
const static cv::Size sz4320p = cv::Size(7680, 4320);//UHDTV2 8K
const static cv::Size sz3MP = cv::Size(2048, 1536);
const static cv::Size sz5MP = cv::Size(2592, 1944);
const static cv::Size sz2K = cv::Size(2048, 2048);
const static cv::Size szODD = cv::Size(127, 61);
const static cv::Size szSmall24 = cv::Size(24, 24);
const static cv::Size szSmall32 = cv::Size(32, 32);
const static cv::Size szSmall64 = cv::Size(64, 64);
const static cv::Size szSmall128 = cv::Size(128, 128);
#define SZ_ALL_VGA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA)
#define SZ_ALL_GA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA)
#define SZ_ALL_HD ::testing::Values(::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
#define SZ_ALL_SMALL ::testing::Values(::perf::szSmall24, ::perf::szSmall32, ::perf::szSmall64, ::perf::szSmall128)
#define SZ_ALL ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA, ::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
#define SZ_TYPICAL ::testing::Values(::perf::szVGA, ::perf::szqHD, ::perf::sz720p, ::perf::szODD)
#define TYPICAL_MAT_SIZES ::perf::szVGA, ::perf::sz720p, ::perf::sz1080p, ::perf::szODD
#define TYPICAL_MAT_TYPES CV_8UC1, CV_8UC4, CV_32FC1
#define TYPICAL_MATS testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( TYPICAL_MAT_TYPES ) )
#define TYPICAL_MATS_C1 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC1, CV_32FC1 ) )
#define TYPICAL_MATS_C4 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC4 ) )
/*****************************************************************************************\
* MatType - printable wrapper over integer 'type' of Mat *
\*****************************************************************************************/
class MatType
{
public:
MatType(int val=0) : _type(val) {}
operator int() const {return _type;}
private:
int _type;
};
/*****************************************************************************************\
* CV_ENUM and CV_FLAGS - macro to create printable wrappers for defines and enums *
\*****************************************************************************************/
#define CV_ENUM(class_name, ...) \
namespace { \
using namespace cv;using namespace cv::cuda; using namespace cv::ocl; \
struct class_name { \
class_name(int val = 0) : val_(val) {} \
operator int() const { return val_; } \
void PrintTo(std::ostream* os) const { \
const int vals[] = { __VA_ARGS__ }; \
const char* svals = #__VA_ARGS__; \
for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
int start = pos; \
while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
++pos; \
if (val_ == vals[i]) { \
*os << std::string(svals + start, svals + pos); \
return; \
} \
} \
*os << "UNKNOWN"; \
} \
static ::testing::internal::ParamGenerator<class_name> all() { \
const class_name vals[] = { __VA_ARGS__ }; \
return ::testing::ValuesIn(vals); \
} \
private: int val_; \
}; \
static inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
#define CV_FLAGS(class_name, ...) \
namespace { \
struct class_name { \
class_name(int val = 0) : val_(val) {} \
operator int() const { return val_; } \
void PrintTo(std::ostream* os) const { \
using namespace cv;using namespace cv::cuda; using namespace cv::ocl; \
const int vals[] = { __VA_ARGS__ }; \
const char* svals = #__VA_ARGS__; \
int value = val_; \
bool first = true; \
for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
int start = pos; \
while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
++pos; \
if ((value & vals[i]) == vals[i]) { \
value &= ~vals[i]; \
if (first) first = false; else *os << "|"; \
*os << std::string(svals + start, svals + pos); \
if (!value) return; \
} \
} \
if (first) *os << "UNKNOWN"; \
} \
private: int val_; \
}; \
static inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
CV_ENUM(MatDepth, CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F, CV_16F)
/*****************************************************************************************\
* Regression control utility for performance testing *
\*****************************************************************************************/
enum ERROR_TYPE
{
ERROR_ABSOLUTE = 0,
ERROR_RELATIVE = 1
};
class Regression
{
public:
static Regression& add(TestBase* test, const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
static Regression& addMoments(TestBase* test, const std::string& name, const cv::Moments & array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
static Regression& addKeypoints(TestBase* test, const std::string& name, const std::vector<cv::KeyPoint>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
static Regression& addMatches(TestBase* test, const std::string& name, const std::vector<cv::DMatch>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
static void Init(const std::string& testSuitName, const std::string& ext = ".xml");
Regression& operator() (const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
private:
static Regression& instance();
Regression();
~Regression();
Regression(const Regression&);
Regression& operator=(const Regression&);
cv::RNG regRNG;//own random numbers generator to make collection and verification work identical
std::string storageInPath;
std::string storageOutPath;
cv::FileStorage storageIn;
cv::FileStorage storageOut;
cv::FileNode rootIn;
std::string currentTestNodeName;
std::string suiteName;
cv::FileStorage& write();
static std::string getCurrentTestNodeName();
static bool isVector(cv::InputArray a);
static double getElem(cv::Mat& m, int x, int y, int cn = 0);
void init(const std::string& testSuitName, const std::string& ext);
void write(cv::InputArray array);
void write(cv::Mat m);
void verify(cv::FileNode node, cv::InputArray array, double eps, ERROR_TYPE err);
void verify(cv::FileNode node, cv::Mat actual, double eps, std::string argname, ERROR_TYPE err);
};
#define SANITY_CHECK(array, ...) ::perf::Regression::add(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_MOMENTS(array, ...) ::perf::Regression::addMoments(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_NOTHING() this->setVerified()
class GpuPerf
{
public:
static bool targetDevice();
};
#define PERF_RUN_CUDA() ::perf::GpuPerf::targetDevice()
/*****************************************************************************************\
* Container for performance metrics *
\*****************************************************************************************/
typedef struct performance_metrics
{
size_t bytesIn;
size_t bytesOut;
unsigned int samples;
unsigned int outliers;
double gmean;
double gstddev;//stddev for log(time)
double mean;
double stddev;
double median;
double min;
double frequency;
int terminationReason;
enum
{
TERM_ITERATIONS = 0,
TERM_TIME = 1,
TERM_INTERRUPT = 2,
TERM_EXCEPTION = 3,
TERM_SKIP_TEST = 4, // there are some limitations and test should be skipped
TERM_UNKNOWN = -1
};
performance_metrics();
void clear();
} performance_metrics;
/*****************************************************************************************\
* Strategy for performance measuring *
\*****************************************************************************************/
enum PERF_STRATEGY
{
PERF_STRATEGY_DEFAULT = -1,
PERF_STRATEGY_BASE = 0,
PERF_STRATEGY_SIMPLE = 1
};
/*****************************************************************************************\
* Base fixture for performance tests *
\*****************************************************************************************/
#ifdef CV_COLLECT_IMPL_DATA
// Implementation collection processing class.
// Accumulates and shapes implementation data.
typedef struct ImplData
{
bool ipp;
bool icv;
bool ipp_mt;
bool ocl;
bool plain;
std::vector<int> implCode;
std::vector<cv::String> funName;
ImplData()
{
Reset();
}
void Reset()
{
cv::setImpl(0);
ipp = icv = ocl = ipp_mt = false;
implCode.clear();
funName.clear();
}
void GetImpl()
{
flagsToVars(cv::getImpl(implCode, funName));
}
std::vector<cv::String> GetCallsForImpl(int impl)
{
std::vector<cv::String> out;
for(int i = 0; i < (int)implCode.size(); i++)
{
if(impl == implCode[i])
out.push_back(funName[i]);
}
return out;
}
// Remove duplicate entries
void ShapeUp()
{
std::vector<int> savedCode;
std::vector<cv::String> savedName;
for(int i = 0; i < (int)implCode.size(); i++)
{
bool match = false;
for(int j = 0; j < (int)savedCode.size(); j++)
{
if(implCode[i] == savedCode[j] && !funName[i].compare(savedName[j]))
{
match = true;
break;
}
}
if(!match)
{
savedCode.push_back(implCode[i]);
savedName.push_back(funName[i]);
}
}
implCode = savedCode;
funName = savedName;
}
// convert flags register to more handy variables
void flagsToVars(int flags)
{
#if defined(HAVE_IPP_ICV)
ipp = 0;
icv = ((flags&CV_IMPL_IPP) > 0);
#else
ipp = ((flags&CV_IMPL_IPP) > 0);
icv = 0;
#endif
ipp_mt = ((flags&CV_IMPL_MT) > 0);
ocl = ((flags&CV_IMPL_OCL) > 0);
plain = (flags == 0);
}
} ImplData;
#endif
#ifdef ENABLE_INSTRUMENTATION
class InstumentData
{
public:
static ::cv::String treeToString();
static void printTree();
};
#endif
class TestBase: public ::testing::Test
{
public:
TestBase();
static void Init(int argc, const char* const argv[]);
static void Init(const std::vector<std::string> & availableImpls,
int argc, const char* const argv[]);
static void RecordRunParameters();
static std::string getDataPath(const std::string& relativePath);
static std::string getSelectedImpl();
static enum PERF_STRATEGY getCurrentModulePerformanceStrategy();
static enum PERF_STRATEGY setModulePerformanceStrategy(enum PERF_STRATEGY strategy);
class PerfSkipTestException: public cvtest::SkipTestException
{
public:
int dummy; // workaround for MacOSX Xcode 7.3 bug (don't make class "empty")
PerfSkipTestException() : dummy(0) {}
};
protected:
virtual void PerfTestBody() = 0;
virtual void SetUp() CV_OVERRIDE;
virtual void TearDown() CV_OVERRIDE;
bool startTimer(); // bool is dummy for conditional loop
void stopTimer();
bool next();
PERF_STRATEGY getCurrentPerformanceStrategy() const;
enum WarmUpType
{
WARMUP_READ,
WARMUP_WRITE,
WARMUP_RNG,
WARMUP_NONE
};
void reportMetrics(bool toJUnitXML = false);
static void warmup(cv::InputOutputArray a, WarmUpType wtype = WARMUP_READ);
performance_metrics& calcMetrics();
void RunPerfTestBody();
#ifdef CV_COLLECT_IMPL_DATA
ImplData implConf;
#endif
#ifdef ENABLE_INSTRUMENTATION
InstumentData instrConf;
#endif
private:
typedef std::vector<std::pair<int, cv::Size> > SizeVector;
typedef std::vector<int64> TimeVector;
SizeVector inputData;
SizeVector outputData;
unsigned int getTotalInputSize() const;
unsigned int getTotalOutputSize() const;
enum PERF_STRATEGY testStrategy;
TimeVector times;
int64 lastTime;
int64 totalTime;
int64 timeLimit;
static int64 timeLimitDefault;
static unsigned int iterationsLimitDefault;
unsigned int minIters;
unsigned int nIters;
unsigned int currentIter;
unsigned int runsPerIteration;
unsigned int perfValidationStage;
performance_metrics metrics;
void validateMetrics();
static int64 _timeadjustment;
static int64 _calibrate();
static void warmup_impl(cv::Mat m, WarmUpType wtype);
static int getSizeInBytes(cv::InputArray a);
static cv::Size getSize(cv::InputArray a);
static void declareArray(SizeVector& sizes, cv::InputOutputArray a, WarmUpType wtype);
class _declareHelper
{
public:
_declareHelper& in(cv::InputOutputArray a1, WarmUpType wtype = WARMUP_READ);
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype = WARMUP_READ);
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype = WARMUP_READ);
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype = WARMUP_READ);
_declareHelper& out(cv::InputOutputArray a1, WarmUpType wtype = WARMUP_WRITE);
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype = WARMUP_WRITE);
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype = WARMUP_WRITE);
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype = WARMUP_WRITE);
_declareHelper& iterations(unsigned int n);
_declareHelper& time(double timeLimitSecs);
_declareHelper& tbb_threads(int n = -1);
_declareHelper& runs(unsigned int runsNumber);
_declareHelper& strategy(enum PERF_STRATEGY s);
private:
TestBase* test;
_declareHelper(TestBase* t);
_declareHelper(const _declareHelper&);
_declareHelper& operator=(const _declareHelper&);
friend class TestBase;
};
friend class _declareHelper;
bool verified;
public:
_declareHelper declare;
void setVerified() { this->verified = true; }
};
template<typename T> class TestBaseWithParam: public TestBase, public ::testing::WithParamInterface<T> {};
typedef tuple<cv::Size, MatType> Size_MatType_t;
typedef TestBaseWithParam<Size_MatType_t> Size_MatType;
/*****************************************************************************************\
* Print functions for googletest *
\*****************************************************************************************/
void PrintTo(const MatType& t, std::ostream* os);
} //namespace perf
namespace cv
{
void PrintTo(const String& str, ::std::ostream* os);
void PrintTo(const Size& sz, ::std::ostream* os);
} //namespace cv
/*****************************************************************************************\
* Macro definitions for performance tests *
\*****************************************************************************************/
#define CV__PERF_TEST_BODY_IMPL(name) \
{ \
CV__TEST_NAMESPACE_CHECK \
CV__TRACE_APP_FUNCTION_NAME("PERF_TEST: " name); \
try { \
::cvtest::testSetUp(); \
RunPerfTestBody(); \
} \
catch (cvtest::details::SkipTestExceptionBase& e) \
{ \
printf("[ SKIP ] %s\n", e.what()); \
} \
::cvtest::testTearDown(); \
}
#define PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name) \
test_case_name##_##test_name##_perf_namespace_proxy
// Defines a performance test.
//
// The first parameter is the name of the test case, and the second
// parameter is the name of the test within the test case.
//
// The user should put his test code between braces after using this
// macro. Example:
//
// PERF_TEST(FooTest, InitializesCorrectly) {
// Foo foo;
// EXPECT_TRUE(foo.StatusIsOK());
// }
#define PERF_TEST(test_case_name, test_name)\
TEST_(test_case_name, test_name, ::perf::TestBase, PerfTestBody, CV__PERF_TEST_BODY_IMPL)
// Defines a performance test that uses a test fixture.
//
// The first parameter is the name of the test fixture class, which
// also doubles as the test case name. The second parameter is the
// name of the test within the test case.
//
// A test fixture class must be declared earlier. The user should put
// his test code between braces after using this macro. Example:
//
// class FooTest : public ::perf::TestBase {
// protected:
// virtual void SetUp() { TestBase::SetUp(); b_.AddElement(3); }
//
// Foo a_;
// Foo b_;
// };
//
// PERF_TEST_F(FooTest, InitializesCorrectly) {
// EXPECT_TRUE(a_.StatusIsOK());
// }
//
// PERF_TEST_F(FooTest, ReturnsElementCountCorrectly) {
// EXPECT_EQ(0, a_.size());
// EXPECT_EQ(1, b_.size());
// }
#define PERF_TEST_F(fixture, testname) \
namespace PERF_PROXY_NAMESPACE_NAME_(fixture, testname) {\
class TestBase {/*compile error for this class means that you are trying to use perf::TestBase as a fixture*/};\
class fixture : public ::fixture {\
public:\
fixture() {}\
protected:\
virtual void PerfTestBody();\
};\
TEST_F(fixture, testname){ CV__PERF_TEST_BODY_IMPL(#fixture "_" #testname); }\
}\
void PERF_PROXY_NAMESPACE_NAME_(fixture, testname)::fixture::PerfTestBody()
// Defines a parametrized performance test.
//
// @Note PERF_TEST_P() below violates behavior of original Google Tests - there is no tests instantiation in original TEST_P()
// This macro is intended for usage with separate INSTANTIATE_TEST_CASE_P macro
#define PERF_TEST_P_(test_case_name, test_name) CV__TEST_P(test_case_name, test_name, PerfTestBody, CV__PERF_TEST_BODY_IMPL)
// Defines a parametrized performance test.
//
// @Note Original TEST_P() macro doesn't instantiate tests with parameters. To keep original usage use PERF_TEST_P_() macro
//
// The first parameter is the name of the test fixture class, which
// also doubles as the test case name. The second parameter is the
// name of the test within the test case.
//
// The user should put his test code between braces after using this
// macro. Example:
//
// typedef ::perf::TestBaseWithParam<cv::Size> FooTest;
//
// PERF_TEST_P(FooTest, DoTestingRight, ::testing::Values(::perf::szVGA, ::perf::sz720p) {
// cv::Mat b(GetParam(), CV_8U, cv::Scalar(10));
// cv::Mat a(GetParam(), CV_8U, cv::Scalar(20));
// cv::Mat c(GetParam(), CV_8U, cv::Scalar(0));
//
// declare.in(a, b).out(c).time(0.5);
//
// TEST_CYCLE() cv::add(a, b, c);
//
// SANITY_CHECK(c);
// }
#define PERF_TEST_P(fixture, name, params) \
class fixture##_##name : public fixture {\
public:\
fixture##_##name() {}\
protected:\
virtual void PerfTestBody();\
};\
CV__TEST_P(fixture##_##name, name, PerfTestBodyDummy, CV__PERF_TEST_BODY_IMPL){} \
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
void fixture##_##name::PerfTestBody()
#ifndef __CV_TEST_EXEC_ARGS
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
#define __CV_TEST_EXEC_ARGS(...) \
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
#else
#define __CV_TEST_EXEC_ARGS(...) \
__VA_ARGS__;
#endif
#endif
#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \
CV_TRACE_FUNCTION(); \
{ CV_TRACE_REGION("INIT"); \
::perf::Regression::Init(#modulename); \
::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls), \
argc, argv); \
::testing::InitGoogleTest(&argc, argv); \
::testing::UnitTest::GetInstance()->listeners().Append(new cvtest::SystemInfoCollector); \
::testing::Test::RecordProperty("cv_module_name", #modulename); \
::perf::TestBase::RecordRunParameters(); \
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
} \
return RUN_ALL_TESTS();
// impls must be an array, not a pointer; "plain" should always be one of the implementations
#define CV_PERF_TEST_MAIN_WITH_IMPLS(modulename, impls, ...) \
int main(int argc, char **argv)\
{\
CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, __VA_ARGS__)\
}
#define CV_PERF_TEST_MAIN(modulename, ...) \
int main(int argc, char **argv)\
{\
const char * plain_only[] = { "plain" };\
CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only, __VA_ARGS__)\
}
//! deprecated
#define TEST_CYCLE_N(n) for(declare.iterations(n); next() && startTimer(); stopTimer())
//! deprecated
#define TEST_CYCLE() for(; next() && startTimer(); stopTimer())
//! deprecated
#define TEST_CYCLE_MULTIRUN(runsNum) for(declare.runs(runsNum); next() && startTimer(); stopTimer()) for(int r = 0; r < runsNum; ++r)
#define PERF_SAMPLE_BEGIN() \
for(; next() && startTimer(); stopTimer()) \
{ \
CV_TRACE_REGION("iteration");
#define PERF_SAMPLE_END() \
}
namespace perf
{
namespace comparators
{
template<typename T>
struct RectLess_
{
bool operator()(const cv::Rect_<T>& r1, const cv::Rect_<T>& r2) const
{
return r1.x < r2.x ||
(r1.x == r2.x && r1.y < r2.y) ||
(r1.x == r2.x && r1.y == r2.y && r1.width < r2.width) ||
(r1.x == r2.x && r1.y == r2.y && r1.width == r2.width && r1.height < r2.height);
}
};
typedef RectLess_<int> RectLess;
struct KeypointGreater
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
if (kp1.response > kp2.response) return true;
if (kp1.response < kp2.response) return false;
if (kp1.size > kp2.size) return true;
if (kp1.size < kp2.size) return false;
if (kp1.octave > kp2.octave) return true;
if (kp1.octave < kp2.octave) return false;
if (kp1.pt.y < kp2.pt.y) return false;
if (kp1.pt.y > kp2.pt.y) return true;
return kp1.pt.x < kp2.pt.x;
}
};
} //namespace comparators
void sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors);
} //namespace perf
#endif //OPENCV_TS_PERF_HPP

237
modules/ts/misc/chart.py Normal file
View File

@@ -0,0 +1,237 @@
#!/usr/bin/env python
import testlog_parser, sys, os, xml, re
from table_formatter import *
from optparse import OptionParser
cvsize_re = re.compile("^\d+x\d+$")
cvtype_re = re.compile("^(CV_)(8U|8S|16U|16S|32S|32F|64F)(C\d{1,3})?$")
def keyselector(a):
if cvsize_re.match(a):
size = [int(d) for d in a.split('x')]
return size[0] * size[1]
elif cvtype_re.match(a):
if a.startswith("CV_"):
a = a[3:]
depth = 7
if a[0] == '8':
depth = (0, 1) [a[1] == 'S']
elif a[0] == '1':
depth = (2, 3) [a[2] == 'S']
elif a[2] == 'S':
depth = 4
elif a[0] == '3':
depth = 5
elif a[0] == '6':
depth = 6
cidx = a.find('C')
if cidx < 0:
channels = 1
else:
channels = int(a[a.index('C') + 1:])
#return (depth & 7) + ((channels - 1) << 3)
return ((channels-1) & 511) + (depth << 9)
return a
convert = lambda text: int(text) if text.isdigit() else text
alphanum_keyselector = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(keyselector(key))) ]
def getValueParams(test):
param = test.get("value_param")
if not param:
return []
if param.startswith("("):
param = param[1:]
if param.endswith(")"):
param = param[:-1]
args = []
prev_pos = 0
start = 0
balance = 0
while True:
idx = param.find(",", prev_pos)
if idx < 0:
break
idxlb = param.find("(", prev_pos, idx)
while idxlb >= 0:
balance += 1
idxlb = param.find("(", idxlb+1, idx)
idxrb = param.find(")", prev_pos, idx)
while idxrb >= 0:
balance -= 1
idxrb = param.find(")", idxrb+1, idx)
assert(balance >= 0)
if balance == 0:
args.append(param[start:idx].strip())
start = idx + 1
prev_pos = idx + 1
args.append(param[start:].strip())
return args
#return [p.strip() for p in param.split(",")]
def nextPermutation(indexes, lists, x, y):
idx = len(indexes)-1
while idx >= 0:
while idx == x or idx == y:
idx -= 1
if idx < 0:
return False
v = indexes[idx] + 1
if v < len(lists[idx]):
indexes[idx] = v;
return True;
else:
indexes[idx] = 0;
idx -= 1
return False
def getTestWideName(sname, indexes, lists, x, y):
name = sname + "::("
for i in range(len(indexes)):
if i > 0:
name += ", "
if i == x:
name += "X"
elif i == y:
name += "Y"
else:
name += lists[i][indexes[i]]
return str(name + ")")
def getTest(stests, x, y, row, col):
for pair in stests:
if pair[1][x] == row and pair[1][y] == col:
return pair[0]
return None
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
parser.add_option("-x", "", dest="x", help="argument number for rows", metavar="ROW", default=1)
parser.add_option("-y", "", dest="y", help="argument number for columns", metavar="COL", default=0)
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
(options, args) = parser.parse_args()
if len(args) != 1:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
exit(1)
options.generateHtml = detectHtmlOutputType(options.format)
if options.metric not in metrix_table:
options.metric = "gmean"
if options.metric.endswith("%"):
options.metric = options.metric[:-1]
getter = metrix_table[options.metric][1]
tests = testlog_parser.parseLogFile(args[0])
if options.filter:
expr = re.compile(options.filter)
tests = [(t,getValueParams(t)) for t in tests if expr.search(str(t))]
else:
tests = [(t,getValueParams(t)) for t in tests]
args[0] = os.path.basename(args[0])
if not tests:
print >> sys.stderr, "Error - no tests matched"
exit(1)
argsnum = len(tests[0][1])
sname = tests[0][0].shortName()
arglists = []
for i in range(argsnum):
arglists.append({})
names = set()
names1 = set()
for pair in tests:
sn = pair[0].shortName()
if len(pair[1]) > 1:
names.add(sn)
else:
names1.add(sn)
if sn == sname:
if len(pair[1]) != argsnum:
print >> sys.stderr, "Error - unable to create chart tables for functions having different argument numbers"
sys.exit(1)
for i in range(argsnum):
arglists[i][pair[1][i]] = 1
if names1 or len(names) != 1:
print >> sys.stderr, "Error - unable to create tables for functions from different test suits:"
i = 1
for name in sorted(names):
print >> sys.stderr, "%4s: %s" % (i, name)
i += 1
if names1:
print >> sys.stderr, "Other suits in this log (can not be chosen):"
for name in sorted(names1):
print >> sys.stderr, "%4s: %s" % (i, name)
i += 1
sys.exit(1)
if argsnum < 2:
print >> sys.stderr, "Error - tests from %s have less than 2 parameters" % sname
exit(1)
for i in range(argsnum):
arglists[i] = sorted([str(key) for key in arglists[i].iterkeys()], key=alphanum_keyselector)
if options.generateHtml and options.format != "moinwiki":
htmlPrintHeader(sys.stdout, "Report %s for %s" % (args[0], sname))
indexes = [0] * argsnum
x = int(options.x)
y = int(options.y)
if x == y or x < 0 or y < 0 or x >= argsnum or y >= argsnum:
x = 1
y = 0
while True:
stests = []
for pair in tests:
t = pair[0]
v = pair[1]
for i in range(argsnum):
if i != x and i != y:
if v[i] != arglists[i][indexes[i]]:
t = None
break
if t:
stests.append(pair)
tbl = table(metrix_table[options.metric][0] + " for\n" + getTestWideName(sname, indexes, arglists, x, y))
tbl.newColumn("x", "X\Y")
for col in arglists[y]:
tbl.newColumn(col, col, align="center")
for row in arglists[x]:
tbl.newRow()
tbl.newCell("x", row)
for col in arglists[y]:
case = getTest(stests, x, y, row, col)
if case:
status = case.get("status")
if status != "run":
tbl.newCell(col, status, color = "red")
else:
val = getter(case, None, options.units)
if isinstance(val, float):
tbl.newCell(col, "%.2f %s" % (val, options.units), val)
else:
tbl.newCell(col, val, val)
else:
tbl.newCell(col, "-")
if options.generateHtml:
tbl.htmlPrintTable(sys.stdout, options.format == "moinwiki")
else:
tbl.consolePrintTable(sys.stdout)
if not nextPermutation(indexes, arglists, x, y):
break
if options.generateHtml and options.format != "moinwiki":
htmlPrintFooter(sys.stdout)

386
modules/ts/misc/color.py Normal file
View File

@@ -0,0 +1,386 @@
#!/usr/bin/env python
import math, os, sys
webcolors = {
"indianred": "#cd5c5c",
"lightcoral": "#f08080",
"salmon": "#fa8072",
"darksalmon": "#e9967a",
"lightsalmon": "#ffa07a",
"red": "#ff0000",
"crimson": "#dc143c",
"firebrick": "#b22222",
"darkred": "#8b0000",
"pink": "#ffc0cb",
"lightpink": "#ffb6c1",
"hotpink": "#ff69b4",
"deeppink": "#ff1493",
"mediumvioletred": "#c71585",
"palevioletred": "#db7093",
"lightsalmon": "#ffa07a",
"coral": "#ff7f50",
"tomato": "#ff6347",
"orangered": "#ff4500",
"darkorange": "#ff8c00",
"orange": "#ffa500",
"gold": "#ffd700",
"yellow": "#ffff00",
"lightyellow": "#ffffe0",
"lemonchiffon": "#fffacd",
"lightgoldenrodyellow": "#fafad2",
"papayawhip": "#ffefd5",
"moccasin": "#ffe4b5",
"peachpuff": "#ffdab9",
"palegoldenrod": "#eee8aa",
"khaki": "#f0e68c",
"darkkhaki": "#bdb76b",
"lavender": "#e6e6fa",
"thistle": "#d8bfd8",
"plum": "#dda0dd",
"violet": "#ee82ee",
"orchid": "#da70d6",
"fuchsia": "#ff00ff",
"magenta": "#ff00ff",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"blueviolet": "#8a2be2",
"darkviolet": "#9400d3",
"darkorchid": "#9932cc",
"darkmagenta": "#8b008b",
"purple": "#800080",
"indigo": "#4b0082",
"darkslateblue": "#483d8b",
"slateblue": "#6a5acd",
"mediumslateblue": "#7b68ee",
"greenyellow": "#adff2f",
"chartreuse": "#7fff00",
"lawngreen": "#7cfc00",
"lime": "#00ff00",
"limegreen": "#32cd32",
"palegreen": "#98fb98",
"lightgreen": "#90ee90",
"mediumspringgreen": "#00fa9a",
"springgreen": "#00ff7f",
"mediumseagreen": "#3cb371",
"seagreen": "#2e8b57",
"forestgreen": "#228b22",
"green": "#008000",
"darkgreen": "#006400",
"yellowgreen": "#9acd32",
"olivedrab": "#6b8e23",
"olive": "#808000",
"darkolivegreen": "#556b2f",
"mediumaquamarine": "#66cdaa",
"darkseagreen": "#8fbc8f",
"lightseagreen": "#20b2aa",
"darkcyan": "#008b8b",
"teal": "#008080",
"aqua": "#00ffff",
"cyan": "#00ffff",
"lightcyan": "#e0ffff",
"paleturquoise": "#afeeee",
"aquamarine": "#7fffd4",
"turquoise": "#40e0d0",
"mediumturquoise": "#48d1cc",
"darkturquoise": "#00ced1",
"cadetblue": "#5f9ea0",
"steelblue": "#4682b4",
"lightsteelblue": "#b0c4de",
"powderblue": "#b0e0e6",
"lightblue": "#add8e6",
"skyblue": "#87ceeb",
"lightskyblue": "#87cefa",
"deepskyblue": "#00bfff",
"dodgerblue": "#1e90ff",
"cornflowerblue": "#6495ed",
"royalblue": "#4169e1",
"blue": "#0000ff",
"mediumblue": "#0000cd",
"darkblue": "#00008b",
"navy": "#000080",
"midnightblue": "#191970",
"cornsilk": "#fff8dc",
"blanchedalmond": "#ffebcd",
"bisque": "#ffe4c4",
"navajowhite": "#ffdead",
"wheat": "#f5deb3",
"burlywood": "#deb887",
"tan": "#d2b48c",
"rosybrown": "#bc8f8f",
"sandybrown": "#f4a460",
"goldenrod": "#daa520",
"darkgoldenrod": "#b8860b",
"peru": "#cd853f",
"chocolate": "#d2691e",
"saddlebrown": "#8b4513",
"sienna": "#a0522d",
"brown": "#a52a2a",
"maroon": "#800000",
"white": "#ffffff",
"snow": "#fffafa",
"honeydew": "#f0fff0",
"mintcream": "#f5fffa",
"azure": "#f0ffff",
"aliceblue": "#f0f8ff",
"ghostwhite": "#f8f8ff",
"whitesmoke": "#f5f5f5",
"seashell": "#fff5ee",
"beige": "#f5f5dc",
"oldlace": "#fdf5e6",
"floralwhite": "#fffaf0",
"ivory": "#fffff0",
"antiquewhite": "#faebd7",
"linen": "#faf0e6",
"lavenderblush": "#fff0f5",
"mistyrose": "#ffe4e1",
"gainsboro": "#dcdcdc",
"lightgrey": "#d3d3d3",
"silver": "#c0c0c0",
"darkgray": "#a9a9a9",
"gray": "#808080",
"dimgray": "#696969",
"lightslategray": "#778899",
"slategray": "#708090",
"darkslategray": "#2f4f4f",
"black": "#000000",
}
if os.name == "nt":
consoleColors = [
"#000000", #{ 0, 0, 0 },//0 - black
"#000080", #{ 0, 0, 128 },//1 - navy
"#008000", #{ 0, 128, 0 },//2 - green
"#008080", #{ 0, 128, 128 },//3 - teal
"#800000", #{ 128, 0, 0 },//4 - maroon
"#800080", #{ 128, 0, 128 },//5 - purple
"#808000", #{ 128, 128, 0 },//6 - olive
"#C0C0C0", #{ 192, 192, 192 },//7 - silver
"#808080", #{ 128, 128, 128 },//8 - gray
"#0000FF", #{ 0, 0, 255 },//9 - blue
"#00FF00", #{ 0, 255, 0 },//a - lime
"#00FFFF", #{ 0, 255, 255 },//b - cyan
"#FF0000", #{ 255, 0, 0 },//c - red
"#FF00FF", #{ 255, 0, 255 },//d - magenta
"#FFFF00", #{ 255, 255, 0 },//e - yellow
"#FFFFFF", #{ 255, 255, 255 } //f - white
]
else:
consoleColors = [
"#2e3436",
"#cc0000",
"#4e9a06",
"#c4a000",
"#3465a4",
"#75507b",
"#06989a",
"#d3d7cf",
"#ffffff",
"#555753",
"#ef2929",
"#8ae234",
"#fce94f",
"#729fcf",
"#ad7fa8",
"#34e2e2",
"#eeeeec",
]
def RGB2LAB(r,g,b):
if max(r,g,b):
r /= 255.
g /= 255.
b /= 255.
X = (0.412453 * r + 0.357580 * g + 0.180423 * b) / 0.950456
Y = (0.212671 * r + 0.715160 * g + 0.072169 * b)
Z = (0.019334 * r + 0.119193 * g + 0.950227 * b) / 1.088754
#[X * 0.950456] [0.412453 0.357580 0.180423] [R]
#[Y ] = [0.212671 0.715160 0.072169] * [G]
#[Z * 1.088754] [0.019334 0.119193 0.950227] [B]
T = 0.008856 #threshold
if X > T:
fX = math.pow(X, 1./3.)
else:
fX = 7.787 * X + 16./116.
# Compute L
if Y > T:
Y3 = math.pow(Y, 1./3.)
fY = Y3
L = 116. * Y3 - 16.0
else:
fY = 7.787 * Y + 16./116.
L = 903.3 * Y
if Z > T:
fZ = math.pow(Z, 1./3.)
else:
fZ = 7.787 * Z + 16./116.
# Compute a and b
a = 500. * (fX - fY)
b = 200. * (fY - fZ)
return (L,a,b)
def colorDistance(r1,g1,b1 = None, r2 = None, g2 = None,b2 = None):
if type(r1) == tuple and type(g1) == tuple and b1 is None and r2 is None and g2 is None and b2 is None:
(l1,a1,b1) = RGB2LAB(*r1)
(l2,a2,b2) = RGB2LAB(*g1)
else:
(l1,a1,b1) = RGB2LAB(r1,g1,b1)
(l2,a2,b2) = RGB2LAB(r2,g2,b2)
#CIE94
dl = l1-l2
C1 = math.sqrt(a1*a1 + b1*b1)
C2 = math.sqrt(a2*a2 + b2*b2)
dC = C1 - C2
da = a1-a2
db = b1-b2
dH = math.sqrt(max(0, da*da + db*db - dC*dC))
Kl = 1
K1 = 0.045
K2 = 0.015
s1 = dl/Kl
s2 = dC/(1. + K1 * C1)
s3 = dH/(1. + K2 * C1)
return math.sqrt(s1*s1 + s2*s2 + s3*s3)
def parseHexColor(col):
if len(col) != 4 and len(col) != 7 and not col.startswith("#"):
return (0,0,0)
if len(col) == 4:
r = col[1]*2
g = col[2]*2
b = col[3]*2
else:
r = col[1:3]
g = col[3:5]
b = col[5:7]
return (int(r,16), int(g,16), int(b,16))
def getColor(col):
if isinstance(col, str):
if col.lower() in webcolors:
return parseHexColor(webcolors[col.lower()])
else:
return parseHexColor(col)
else:
return col
def getNearestConsoleColor(col):
color = getColor(col)
minidx = 0
mindist = colorDistance(color, getColor(consoleColors[0]))
for i in range(len(consoleColors)):
dist = colorDistance(color, getColor(consoleColors[i]))
if dist < mindist:
mindist = dist
minidx = i
return minidx
if os.name == 'nt':
import msvcrt
from ctypes import windll, Structure, c_short, c_ushort, byref
SHORT = c_short
WORD = c_ushort
class COORD(Structure):
_fields_ = [
("X", SHORT),
("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)]
class winConsoleColorizer(object):
def __init__(self, stream):
self.handle = msvcrt.get_osfhandle(stream.fileno())
self.default_attrs = 7#self.get_text_attr()
self.stream = stream
def get_text_attr(self):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.handle, byref(csbi))
return csbi.wAttributes
def set_text_attr(self, color):
windll.kernel32.SetConsoleTextAttribute(self.handle, color)
def write(self, *text, **attrs):
if not text:
return
color = attrs.get("color", None)
if color:
col = getNearestConsoleColor(color)
self.stream.flush()
self.set_text_attr(col)
self.stream.write(" ".join([str(t) for t in text]))
if color:
self.stream.flush()
self.set_text_attr(self.default_attrs)
class dummyColorizer(object):
def __init__(self, stream):
self.stream = stream
def write(self, *text, **attrs):
if text:
self.stream.write(" ".join([str(t) for t in text]))
class asciiSeqColorizer(object):
RESET_SEQ = "\033[0m"
#BOLD_SEQ = "\033[1m"
ITALIC_SEQ = "\033[3m"
UNDERLINE_SEQ = "\033[4m"
STRIKEOUT_SEQ = "\033[9m"
COLOR_SEQ0 = "\033[00;%dm" #dark
COLOR_SEQ1 = "\033[01;%dm" #bold and light
def __init__(self, stream):
self.stream = stream
def get_seq(self, code):
if code > 8:
return self.__class__.COLOR_SEQ1 % (30 + code - 9)
else:
return self.__class__.COLOR_SEQ0 % (30 + code)
def write(self, *text, **attrs):
if not text:
return
color = attrs.get("color", None)
if color:
col = getNearestConsoleColor(color)
self.stream.write(self.get_seq(col))
self.stream.write(" ".join([str(t) for t in text]))
if color:
self.stream.write(self.__class__.RESET_SEQ)
def getColorizer(stream):
if stream.isatty():
if os.name == "nt":
return winConsoleColorizer(stream)
else:
return asciiSeqColorizer(stream)
else:
return dummyColorizer(stream)

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python
from optparse import OptionParser
import glob, sys, os, re
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="output", help="output file name", metavar="FILENAME", default=None)
(options, args) = parser.parse_args()
if not options.output:
sys.stderr.write("Error: output file name is not provided")
exit(-1)
files = []
for arg in args:
if ("*" in arg) or ("?" in arg):
files.extend([os.path.abspath(f) for f in glob.glob(arg)])
else:
files.append(os.path.abspath(arg))
html = None
for f in sorted(files):
try:
fobj = open(f)
if not fobj:
continue
text = fobj.read()
if not html:
html = text
continue
idx1 = text.find("<tbody>") + len("<tbody>")
idx2 = html.rfind("</tbody>")
html = html[:idx2] + re.sub(r"[ \t\n\r]+", " ", text[idx1:])
except:
pass
if html:
idx1 = text.find("<title>") + len("<title>")
idx2 = html.find("</title>")
html = html[:idx1] + "OpenCV performance testing report" + html[idx2:]
open(options.output, "w").write(html)
else:
sys.stderr.write("Error: no input data")
exit(-1)

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python
from __future__ import print_function
import testlog_parser, sys, os, xml, glob, re
from table_formatter import *
from optparse import OptionParser
from operator import itemgetter, attrgetter
from summary import getSetName, alphanum_keyselector
import re
if __name__ == "__main__":
usage = "%prog <log_name>.xml [...]"
parser = OptionParser(usage = usage)
parser.add_option("-o", "--output", dest = "format",
help = "output results in text format (can be 'txt', 'html' or 'auto' - default)",
metavar = 'FMT', default = 'auto')
parser.add_option("--failed-only", action = "store_true", dest = "failedOnly",
help = "print only failed tests", default = False)
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
files = []
testsuits = [] # testsuit module, name, time, num, flag for failed tests
overall_time = 0
seen = set()
for arg in args:
if ("*" in arg) or ("?" in arg):
flist = [os.path.abspath(f) for f in glob.glob(arg)]
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
else:
fname = os.path.abspath(arg)
if fname not in seen and not seen.add(fname):
files.append(fname)
file = os.path.abspath(fname)
if not os.path.isfile(file):
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
parser.print_help()
exit(0)
fname = os.path.basename(fname)
find_module_name = re.search(r'([^_]*)', fname)
module_name = find_module_name.group(0)
test_sets = []
try:
tests = testlog_parser.parseLogFile(file)
if tests:
test_sets.append((os.path.basename(file), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + file + "\" - " + str(err) + os.linesep)
if not test_sets:
continue
# find matches
setsCount = len(test_sets)
test_cases = {}
name_extractor = lambda name: str(name)
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
prevGroupName = None
suit_time = 0
suit_num = 0
fails_num = 0
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
cases = test_cases[name]
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
if prevGroupName != None:
suit_time = suit_time/60 #from seconds to minutes
testsuits.append({'module': module_name, 'name': prevGroupName, \
'time': suit_time, 'num': suit_num, 'failed': fails_num})
overall_time += suit_time
suit_time = 0
suit_num = 0
fails_num = 0
prevGroupName = groupName
for i in range(setsCount):
case = cases[i]
if not case is None:
suit_num += 1
if case.get('status') == 'run':
suit_time += case.get('time')
if case.get('status') == 'failed':
fails_num += 1
# last testsuit processing
suit_time = suit_time/60
testsuits.append({'module': module_name, 'name': prevGroupName, \
'time': suit_time, 'num': suit_num, 'failed': fails_num})
overall_time += suit_time
if len(testsuits)==0:
exit(0)
tbl = table()
rows = 0
if not options.failedOnly:
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
tbl.newColumn('time', 'Time (min)', align = 'center', cssclass = 'col_name')
tbl.newColumn('num', 'Num of tests', align = 'center', cssclass = 'col_name')
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
# rows
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
tbl.newRow()
tbl.newCell('module', suit['module'])
tbl.newCell('name', suit['name'])
tbl.newCell('time', formatValue(suit['time'], '', ''), suit['time'])
tbl.newCell('num', suit['num'])
if (suit['failed'] != 0):
tbl.newCell('failed', suit['failed'])
else:
tbl.newCell('failed', ' ')
rows += 1
else:
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
# rows
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
if (suit['failed'] != 0):
tbl.newRow()
tbl.newCell('module', suit['module'])
tbl.newCell('name', suit['name'])
tbl.newCell('failed', suit['failed'])
rows += 1
# output table
if rows:
if options.generateHtml:
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
if not options.failedOnly:
print('\nOverall time: %.2f min\n' % overall_time)
tbl.consolePrintTable(sys.stdout)
print(2 * '\n')

103
modules/ts/misc/report.py Normal file
View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python
import testlog_parser, sys, os, xml, re, glob
from table_formatter import *
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-c", "--columns", dest="columns", help="comma-separated list of columns to show", metavar="COLS", default="")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
(options, args) = parser.parse_args()
if len(args) < 1:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
exit(0)
options.generateHtml = detectHtmlOutputType(options.format)
# expand wildcards and filter duplicates
files = []
files1 = []
for arg in args:
if ("*" in arg) or ("?" in arg):
files1.extend([os.path.abspath(f) for f in glob.glob(arg)])
else:
files.append(os.path.abspath(arg))
seen = set()
files = [ x for x in files if x not in seen and not seen.add(x)]
files.extend((set(files1) - set(files)))
args = files
# load test data
tests = []
files = []
for arg in set(args):
try:
cases = testlog_parser.parseLogFile(arg)
if cases:
files.append(os.path.basename(arg))
tests.extend(cases)
except:
pass
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
tbl = table(", ".join(files))
if options.columns:
metrics = [s.strip() for s in options.columns.split(",")]
metrics = [m for m in metrics if m and not m.endswith("%") and m in metrix_table]
else:
metrics = None
if not metrics:
metrics = ["name", "samples", "outliers", "min", "median", "gmean", "mean", "stddev"]
if "name" not in metrics:
metrics.insert(0, "name")
for m in metrics:
if m == "name":
tbl.newColumn(m, metrix_table[m][0])
else:
tbl.newColumn(m, metrix_table[m][0], align = "center")
needNewRow = True
for case in sorted(tests, key=lambda x: str(x)):
if needNewRow:
tbl.newRow()
if not options.showall:
needNewRow = False
status = case.get("status")
if status != "run":
if status != "notrun":
needNewRow = True
for m in metrics:
if m == "name":
tbl.newCell(m, str(case))
else:
tbl.newCell(m, status, color = "red")
else:
needNewRow = True
for m in metrics:
val = metrix_table[m][1](case, None, options.units)
if isinstance(val, float):
tbl.newCell(m, "%.2f %s" % (val, options.units), val)
else:
tbl.newCell(m, val, val)
if not needNewRow:
tbl.trimLastRow()
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Report %s tests from %s" % (len(tests), ", ".join(files)))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)

142
modules/ts/misc/run.py Normal file
View File

@@ -0,0 +1,142 @@
#!/usr/bin/env python
import os
import argparse
import logging
import datetime
from run_utils import Err, CMakeCache, log, execute
from run_suite import TestSuite
from run_android import AndroidTestSuite
epilog = '''
NOTE:
Additional options starting with "--gtest_" and "--perf_" will be passed directly to the test executables.
'''
if __name__ == "__main__":
# log.basicConfig(format='[%(levelname)s] %(message)s', level = log.DEBUG)
# log.basicConfig(format='[%(levelname)s] %(message)s', level = log.INFO)
parser = argparse.ArgumentParser(
description='OpenCV test runner script',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("build_path", nargs='?', default=".", help="Path to build directory (should contain CMakeCache.txt, default is current) or to directory with tests (all platform checks will be disabled in this case)")
parser.add_argument("-t", "--tests", metavar="MODULES", default="", help="Comma-separated list of modules to test (example: -t core,imgproc,java)")
parser.add_argument("-b", "--blacklist", metavar="MODULES", default="", help="Comma-separated list of modules to exclude from test (example: -b java)")
parser.add_argument("-a", "--accuracy", action="store_true", default=False, help="Look for accuracy tests instead of performance tests")
parser.add_argument("--check", action="store_true", default=False, help="Shortcut for '--perf_min_samples=1 --perf_force_samples=1'")
parser.add_argument("-w", "--cwd", metavar="PATH", default=".", help="Working directory for tests (default is current)")
parser.add_argument("--list", action="store_true", default=False, help="List available tests (executables)")
parser.add_argument("--list_short", action="store_true", default=False, help="List available tests (aliases)")
parser.add_argument("--list_short_main", action="store_true", default=False, help="List available tests (main repository, aliases)")
parser.add_argument("--configuration", metavar="CFG", default=None, help="Force Debug or Release configuration (for Visual Studio and Java tests build)")
parser.add_argument("-n", "--dry_run", action="store_true", help="Do not run the tests")
parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Print more debug information")
# Valgrind
parser.add_argument("--valgrind", action="store_true", default=False, help="Run C++ tests in valgrind")
parser.add_argument("--valgrind_supp", metavar="FILE", action='append', help="Path to valgrind suppression file (example: --valgrind_supp opencv/platforms/scripts/valgrind.supp)")
parser.add_argument("--valgrind_opt", metavar="OPT", action="append", default=[], help="Add command line option to valgrind (example: --valgrind_opt=--leak-check=full)")
# QEMU
parser.add_argument("--qemu", default="", help="Specify qemu binary and base parameters")
# Android
parser.add_argument("--android", action="store_true", default=False, help="Android: force all tests to run on device")
parser.add_argument("--android_sdk", metavar="PATH", help="Android: path to SDK to use adb and aapt tools")
parser.add_argument("--android_test_data_path", metavar="PATH", default="/sdcard/opencv_testdata/", help="Android: path to testdata on device")
parser.add_argument("--android_env", action='append', help="Android: add environment variable (NAME=VALUE)")
parser.add_argument("--android_propagate_opencv_env", action="store_true", default=False, help="Android: propagate OPENCV* environment variables")
parser.add_argument("--serial", metavar="serial number", default="", help="Android: directs command to the USB device or emulator with the given serial number")
parser.add_argument("--package", metavar="package", default="", help="Java: run JUnit tests for specified module or Android package")
parser.add_argument("--trace", action="store_true", default=False, help="Trace: enable OpenCV tracing")
parser.add_argument("--trace_dump", metavar="trace_dump", default=-1, help="Trace: dump highlight calls (specify max entries count, 0 - dump all)")
args, other_args = parser.parse_known_args()
log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
test_args = [a for a in other_args if a.startswith("--perf_") or a.startswith("--test_") or a.startswith("--gtest_")]
bad_args = [a for a in other_args if a not in test_args]
if len(bad_args) > 0:
log.error("Error: Bad arguments: %s", bad_args)
exit(1)
args.mode = "test" if args.accuracy else "perf"
android_env = []
if args.android_env:
android_env.extend([entry.split("=", 1) for entry in args.android_env])
if args.android_propagate_opencv_env:
android_env.extend([entry for entry in os.environ.items() if entry[0].startswith('OPENCV')])
android_env = dict(android_env)
if args.android_test_data_path:
android_env['OPENCV_TEST_DATA_PATH'] = args.android_test_data_path
if args.valgrind:
try:
ver = execute(["valgrind", "--version"], silent=True)
log.debug("Using %s", ver)
except OSError as e:
log.error("Failed to run valgrind: %s", e)
exit(1)
if len(args.build_path) != 1:
test_args = [a for a in test_args if not a.startswith("--gtest_output=")]
if args.check:
if not [a for a in test_args if a.startswith("--perf_min_samples=")]:
test_args.extend(["--perf_min_samples=1"])
if not [a for a in test_args if a.startswith("--perf_force_samples=")]:
test_args.extend(["--perf_force_samples=1"])
if not [a for a in test_args if a.startswith("--perf_verify_sanity")]:
test_args.extend(["--perf_verify_sanity"])
if bool(os.environ.get('BUILD_PRECOMMIT', None)):
test_args.extend(["--skip_unstable=1"])
ret = 0
logs = []
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
path = args.build_path
try:
if not os.path.isdir(path):
raise Err("Not a directory (should contain CMakeCache.txt ot test executables)")
cache = CMakeCache(args.configuration)
fname = os.path.join(path, "CMakeCache.txt")
if os.path.isfile(fname):
log.debug("Reading cmake cache file: %s", fname)
cache.read(path, fname)
else:
log.debug("Assuming folder contains tests: %s", path)
cache.setDummy(path)
if args.android or cache.getOS() == "android":
log.debug("Creating Android test runner")
suite = AndroidTestSuite(args, cache, stamp, android_env)
else:
log.debug("Creating native test runner")
suite = TestSuite(args, cache, stamp)
if args.list or args.list_short or args.list_short_main:
suite.listTests(args.list_short or args.list_short_main, args.list_short_main)
else:
log.debug("Running tests in '%s', working dir: '%s'", path, args.cwd)
def parseTests(s):
return [o.strip() for o in s.split(",") if o]
logs, ret = suite.runTests(parseTests(args.tests), parseTests(args.blacklist), args.cwd, test_args)
except Err as e:
log.error("ERROR: test path '%s' ==> %s", path, e.msg)
ret = -1
if logs:
log.warning("Collected: %s", logs)
if ret != 0:
log.error("ERROR: some tests have failed")
exit(ret)

View File

@@ -0,0 +1,170 @@
#!/usr/bin/env python
import os
import re
import getpass
from run_utils import Err, log, execute, isColorEnabled, hostos
from run_suite import TestSuite
def exe(program):
return program + ".exe" if hostos == 'nt' else program
class ApkInfo:
def __init__(self):
self.pkg_name = None
self.pkg_target = None
self.pkg_runner = None
def forcePackage(self, package):
if package:
if package.startswith("."):
self.pkg_target += package
else:
self.pkg_target = package
class Tool:
def __init__(self):
self.cmd = []
def run(self, args=[], silent=False):
cmd = self.cmd[:]
cmd.extend(args)
return execute(self.cmd + args, silent)
class Adb(Tool):
def __init__(self, sdk_dir):
Tool.__init__(self)
exe_path = os.path.join(sdk_dir, exe("platform-tools/adb"))
if not os.path.isfile(exe_path) or not os.access(exe_path, os.X_OK):
exe_path = None
# fix adb tool location
if not exe_path:
exe_path = "adb"
self.cmd = [exe_path]
def init(self, serial):
# remember current device serial. Needed if another device is connected while this script runs
if not serial:
serial = self.detectSerial()
if serial:
self.cmd.extend(["-s", serial])
def detectSerial(self):
adb_res = self.run(["devices"], silent=True)
# assume here that device name may consists of any characters except newline
connected_devices = re.findall(r"^[^\n]+[ \t]+device\r?$", adb_res, re.MULTILINE)
if not connected_devices:
raise Err("Can not find Android device")
elif len(connected_devices) != 1:
raise Err("Too many (%s) devices are connected. Please specify single device using --serial option:\n\n%s", len(connected_devices), adb_res)
else:
return connected_devices[0].split("\t")[0]
def getOSIdentifier(self):
return "Android" + self.run(["shell", "getprop ro.build.version.release"], silent=True).strip()
class Aapt(Tool):
def __init__(self, sdk_dir):
Tool.__init__(self)
aapt_fn = exe("aapt")
aapt = None
for r, ds, fs in os.walk(os.path.join(sdk_dir, 'build-tools')):
if aapt_fn in fs:
aapt = os.path.join(r, aapt_fn)
break
if not aapt:
raise Err("Can not find aapt tool: %s", aapt_fn)
self.cmd = [aapt]
def dump(self, exe):
res = ApkInfo()
output = self.run(["dump", "xmltree", exe, "AndroidManifest.xml"], silent=True)
if not output:
raise Err("Can not dump manifest from %s", exe)
tags = re.split(r"[ ]+E: ", output)
# get package name
manifest_tag = [t for t in tags if t.startswith("manifest ")]
if not manifest_tag:
raise Err("Can not read package name from: %s", exe)
res.pkg_name = re.search(r"^[ ]+A: package=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", manifest_tag[0], flags=re.MULTILINE).group("pkg")
# get test instrumentation info
instrumentation_tag = [t for t in tags if t.startswith("instrumentation ")]
if not instrumentation_tag:
raise Err("Can not find instrumentation details in: %s", exe)
res.pkg_runner = re.search(r"^[ ]+A: android:name\(0x[0-9a-f]{8}\)=\"(?P<runner>.*?)\" \(Raw: \"(?P=runner)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("runner")
res.pkg_target = re.search(r"^[ ]+A: android:targetPackage\(0x[0-9a-f]{8}\)=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("pkg")
if not res.pkg_name or not res.pkg_runner or not res.pkg_target:
raise Err("Can not find instrumentation details in: %s", exe)
return res
class AndroidTestSuite(TestSuite):
def __init__(self, options, cache, id, android_env={}):
TestSuite.__init__(self, options, cache, id)
sdk_dir = options.android_sdk or os.environ.get("ANDROID_SDK", False) or os.path.dirname(os.path.dirname(self.cache.android_executable))
log.debug("Detecting Android tools in directory: %s", sdk_dir)
self.adb = Adb(sdk_dir)
self.aapt = Aapt(sdk_dir)
self.env = android_env
def isTest(self, fullpath):
if os.path.isfile(fullpath):
if fullpath.endswith(".apk") or os.access(fullpath, os.X_OK):
return True
return False
def getOS(self):
return self.adb.getOSIdentifier()
def checkPrerequisites(self):
self.adb.init(self.options.serial)
def runTest(self, module, path, logfile, workingDir, args=[]):
args = args[:]
exe = os.path.abspath(path)
if exe.endswith(".apk"):
info = self.aapt.dump(exe)
if not info:
raise Err("Can not read info from test package: %s", exe)
info.forcePackage(self.options.package)
self.adb.run(["uninstall", info.pkg_name])
output = self.adb.run(["install", exe], silent=True)
if not (output and "Success" in output):
raise Err("Can not install package: %s", exe)
params = ["-e package %s" % info.pkg_target]
ret = self.adb.run(["shell", "am instrument -w %s %s/%s" % (" ".join(params), info.pkg_name, info.pkg_runner)])
return None, ret
else:
device_dir = getpass.getuser().replace(" ", "") + "_" + self.options.mode + "/"
if isColorEnabled(args):
args.append("--gtest_color=yes")
tempdir = "/data/local/tmp/"
android_dir = tempdir + device_dir
exename = os.path.basename(exe)
android_exe = android_dir + exename
self.adb.run(["push", exe, android_exe])
self.adb.run(["shell", "chmod 777 " + android_exe])
env_pieces = ["export %s=%s" % (a, b) for a, b in self.env.items()]
pieces = ["cd %s" % android_dir, "./%s %s" % (exename, " ".join(args))]
log.warning("Run: %s" % " && ".join(pieces))
ret = self.adb.run(["shell", " && ".join(env_pieces + pieces)])
# try get log
hostlogpath = os.path.join(workingDir, logfile)
self.adb.run(["pull", android_dir + logfile, hostlogpath])
# cleanup
self.adb.run(["shell", "rm " + android_dir + logfile])
self.adb.run(["shell", "rm " + tempdir + "__opencv_temp.*"], silent=True)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")

119
modules/ts/misc/run_long.py Normal file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python
from __future__ import print_function
import xml.etree.ElementTree as ET
from glob import glob
from pprint import PrettyPrinter as PP
LONG_TESTS_DEBUG_VALGRIND = [
('calib3d', 'Calib3d_InitUndistortRectifyMap.accuracy', 2017.22),
('dnn', 'Reproducibility*', 1000), # large DNN models
('dnn', '*RCNN*', 1000), # very large DNN models
('dnn', '*RFCN*', 1000), # very large DNN models
('dnn', '*EAST*', 1000), # very large DNN models
('dnn', '*VGG16*', 1000), # very large DNN models
('dnn', '*ZFNet*', 1000), # very large DNN models
('dnn', '*ResNet101_DUC_HDC*', 1000), # very large DNN models
('dnn', '*LResNet100E_IR*', 1000), # very large DNN models
('dnn', '*read_yolo_voc_stream*', 1000), # very large DNN models
('dnn', '*eccv16*', 1000), # very large DNN models
('dnn', '*OpenPose*', 1000), # very large DNN models
('dnn', '*SSD/*', 1000), # very large DNN models
('gapi', 'Fluid.MemoryConsumptionDoesNotGrowOnReshape', 1000000), # test doesn't work properly under valgrind
('face', 'CV_Face_FacemarkLBF.test_workflow', 10000.0), # >40min on i7
('features2d', 'Features2d/DescriptorImage.no_crash/3', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/4', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/5', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/6', 1000),
('features2d', 'Features2d/DescriptorImage.no_crash/7', 1000),
('imgcodecs', 'Imgcodecs_Png.write_big', 1000), # memory limit
('imgcodecs', 'Imgcodecs_Tiff.decode_tile16384x16384', 1000), # memory limit
('ml', 'ML_RTrees.regression', 1423.47),
('optflow', 'DenseOpticalFlow_DeepFlow.ReferenceAccuracy', 1360.95),
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/0', 1881.59),
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/1', 5608.75),
('optflow', 'DenseOpticalFlow_GlobalPatchColliderDCT.ReferenceAccuracy', 5433.84),
('optflow', 'DenseOpticalFlow_GlobalPatchColliderWHT.ReferenceAccuracy', 5232.73),
('optflow', 'DenseOpticalFlow_SimpleFlow.ReferenceAccuracy', 1542.1),
('photo', 'Photo_Denoising.speed', 1484.87),
('photo', 'Photo_DenoisingColoredMulti.regression', 2447.11),
('rgbd', 'Rgbd_Normals.compute', 1156.32),
('shape', 'Hauss.regression', 2625.72),
('shape', 'ShapeEMD_SCD.regression', 61913.7),
('shape', 'Shape_SCD.regression', 3311.46),
('tracking', 'AUKF.br_mean_squared_error', 10764.6),
('tracking', 'UKF.br_mean_squared_error', 5228.27),
('tracking', '*DistanceAndOverlap*/1', 1000.0), # dudek
('tracking', '*DistanceAndOverlap*/2', 1000.0), # faceocc2
('videoio', 'videoio/videoio_ffmpeg.write_big*', 1000),
('videoio', 'videoio_ffmpeg.parallel', 1000),
('videoio', '*videocapture_acceleration*', 1000), # valgrind can't track HW buffers: Conditional jump or move depends on uninitialised value(s)
('videoio', '*videowriter_acceleration*', 1000), # valgrind crash: set_mempolicy: Operation not permitted
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_BoostDesc_LBGM.regression', 1124.51),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG120.regression', 2198.1),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG48.regression', 1958.52),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG64.regression', 2113.12),
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG80.regression', 2167.16),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_BoostDesc_LBGM.regression', 1511.39),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG120.regression', 1222.07),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG48.regression', 1059.14),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG64.regression', 1163.41),
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG80.regression', 1179.06),
('ximgproc', 'L0SmoothTest.SplatSurfaceAccuracy', 6382.26),
('ximgproc', 'perf*/1*:perf*/2*:perf*/3*:perf*/4*:perf*/5*:perf*/6*:perf*/7*:perf*/8*:perf*/9*', 1000.0), # only first 10 parameters
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/5', 1086.33),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/7', 1405.05),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/5', 1253.07),
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/7', 1599.98),
('ximgproc', '*MultiThreadReproducibility*/1:*MultiThreadReproducibility*/2:*MultiThreadReproducibility*/3:*MultiThreadReproducibility*/4:*MultiThreadReproducibility*/5:*MultiThreadReproducibility*/6:*MultiThreadReproducibility*/7:*MultiThreadReproducibility*/8:*MultiThreadReproducibility*/9:*MultiThreadReproducibility*/1*', 1000.0),
('ximgproc', '*AdaptiveManifoldRefImplTest*/1:*AdaptiveManifoldRefImplTest*/2:*AdaptiveManifoldRefImplTest*/3', 1000.0),
('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0),
('ximgproc', '*RollingGuidanceFilterTest_BilateralRef*/1*:*RollingGuidanceFilterTest_BilateralRef*/2*:*RollingGuidanceFilterTest_BilateralRef*/3*', 1000.0),
('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0),
]
def longTestFilter(data, module=None):
res = ['*', '-'] + [v for m, v, _time in data if module is None or m == module]
return '--gtest_filter={}'.format(':'.join(res))
# Parse one xml file, filter out tests which took less than 'timeLimit' seconds
# Returns tuple: ( <module_name>, [ (<module_name>, <test_name>, <test_time>), ... ] )
def parseOneFile(filename, timeLimit):
tree = ET.parse(filename)
root = tree.getroot()
def guess(s, delims):
for delim in delims:
tmp = s.partition(delim)
if len(tmp[1]) != 0:
return tmp[0]
return None
module = guess(filename, ['_posix_', '_nt_', '__']) or root.get('cv_module_name')
if not module:
return (None, None)
res = []
for elem in root.findall('.//testcase'):
key = '{}.{}'.format(elem.get('classname'), elem.get('name'))
val = elem.get('time')
if float(val) >= timeLimit:
res.append((module, key, float(val)))
return (module, res)
# Parse all xml files in current folder and combine results into one list
# Print result to the stdout
if __name__ == '__main__':
LIMIT = 1000
res = []
xmls = glob('*.xml')
for xml in xmls:
print('Parsing file', xml, '...')
module, testinfo = parseOneFile(xml, LIMIT)
if not module:
print('SKIP')
continue
res.extend(testinfo)
print('========= RESULTS =========')
PP(indent=4, width=100).pprint(sorted(res))

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python
import os
import re
import sys
from run_utils import Err, log, execute, getPlatformVersion, isColorEnabled, TempEnvDir
from run_long import LONG_TESTS_DEBUG_VALGRIND, longTestFilter
class TestSuite(object):
def __init__(self, options, cache, id):
self.options = options
self.cache = cache
self.nameprefix = "opencv_" + self.options.mode + "_"
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
self.id = id
def getOS(self):
return getPlatformVersion() or self.cache.getOS()
def getLogName(self, app):
return self.getAlias(app) + '_' + str(self.id) + '.xml'
def listTests(self, short=False, main=False):
if len(self.tests) == 0:
raise Err("No tests found")
for t in self.tests:
if short:
t = self.getAlias(t)
if not main or self.cache.isMainModule(t):
log.info("%s", t)
def getAlias(self, fname):
return sorted(self.getAliases(fname), key=len)[0]
def getAliases(self, fname):
def getCuts(fname, prefix):
# filename w/o extension (opencv_test_core)
noext = re.sub(r"\.(exe|apk)$", '', fname)
# filename w/o prefix (core.exe)
nopref = fname
if fname.startswith(prefix):
nopref = fname[len(prefix):]
# filename w/o prefix and extension (core)
noprefext = noext
if noext.startswith(prefix):
noprefext = noext[len(prefix):]
return noext, nopref, noprefext
# input is full path ('/home/.../bin/opencv_test_core') or 'java'
res = [fname]
fname = os.path.basename(fname)
res.append(fname) # filename (opencv_test_core.exe)
for s in getCuts(fname, self.nameprefix):
res.append(s)
if self.cache.build_type == "Debug" and "Visual Studio" in self.cache.cmake_generator:
res.append(re.sub(r"d$", '', s)) # MSVC debug config, remove 'd' suffix
log.debug("Aliases: %s", set(res))
return set(res)
def getTest(self, name):
# return stored test name by provided alias
for t in self.tests:
if name in self.getAliases(t):
return t
raise Err("Can not find test: %s", name)
def getTestList(self, white, black):
res = [t for t in white or self.tests if self.getAlias(t) not in black]
if len(res) == 0:
raise Err("No tests found")
return set(res)
def isTest(self, fullpath):
if fullpath in ['java', 'python2', 'python3']:
return self.options.mode == 'test'
if not os.path.isfile(fullpath):
return False
if self.cache.getOS() == "nt" and not fullpath.endswith(".exe"):
return False
return os.access(fullpath, os.X_OK)
def wrapCommand(self, module, cmd, env):
if self.options.valgrind:
res = ['valgrind']
supp = self.options.valgrind_supp or []
for f in supp:
if os.path.isfile(f):
res.append("--suppressions=%s" % f)
else:
print("WARNING: Valgrind suppression file is missing, SKIP: %s" % f)
res.extend(self.options.valgrind_opt)
has_gtest_filter = next((True for x in cmd if x.startswith('--gtest_filter=')), False)
return res + cmd + ([longTestFilter(LONG_TESTS_DEBUG_VALGRIND, module)] if not has_gtest_filter else [])
elif self.options.qemu:
import shlex
res = shlex.split(self.options.qemu)
for (name, value) in [entry for entry in os.environ.items() if entry[0].startswith('OPENCV') and not entry[0] in env]:
res += ['-E', '"{}={}"'.format(name, value)]
for (name, value) in env.items():
res += ['-E', '"{}={}"'.format(name, value)]
return res + ['--'] + cmd
return cmd
def tryCommand(self, cmd, workingDir):
try:
if 0 == execute(cmd, cwd=workingDir):
return True
except:
pass
return False
def runTest(self, module, path, logfile, workingDir, args=[]):
args = args[:]
exe = os.path.abspath(path)
if module == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type]
if self.options.package:
cmd += ["-Dopencv.test.package=%s" % self.options.package]
cmd += ["buildAndTest"]
ret = execute(cmd, cwd=self.cache.java_test_dir)
return None, ret
elif module in ['python2', 'python3']:
executable = os.getenv('OPENCV_PYTHON_BINARY', None)
if executable is None or module == 'python{}'.format(sys.version_info[0]):
executable = sys.executable
if executable is None:
executable = path
if not self.tryCommand([executable, '--version'], workingDir):
executable = 'python'
cmd = [executable, self.cache.opencv_home + '/modules/python/test/test.py', '--repo', self.cache.opencv_home, '-v'] + args
module_suffix = '' if 'Visual Studio' not in self.cache.cmake_generator else '/' + self.cache.build_type
env = {}
env['PYTHONPATH'] = self.cache.opencv_build + '/lib' + module_suffix + os.pathsep + os.getenv('PYTHONPATH', '')
if self.cache.getOS() == 'nt':
env['PATH'] = self.cache.opencv_build + '/bin' + module_suffix + os.pathsep + os.getenv('PATH', '')
else:
env['LD_LIBRARY_PATH'] = self.cache.opencv_build + '/bin' + os.pathsep + os.getenv('LD_LIBRARY_PATH', '')
ret = execute(cmd, cwd=workingDir, env=env)
return None, ret
else:
if isColorEnabled(args):
args.append("--gtest_color=yes")
env = {}
if not self.options.valgrind and self.options.trace:
env['OPENCV_TRACE'] = '1'
env['OPENCV_TRACE_LOCATION'] = 'OpenCVTrace-{}'.format(self.getLogBaseName(exe))
env['OPENCV_TRACE_SYNC_OPENCL'] = '1'
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
cmd = self.wrapCommand(module, [exe] + args, env)
log.warning("Run: %s" % " ".join(cmd))
ret = execute(cmd, cwd=workingDir, env=env)
try:
if not self.options.valgrind and self.options.trace and int(self.options.trace_dump) >= 0:
import trace_profiler
trace = trace_profiler.Trace(env['OPENCV_TRACE_LOCATION']+'.txt')
trace.process()
trace.dump(max_entries=int(self.options.trace_dump))
except:
import traceback
traceback.print_exc()
pass
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
def runTests(self, tests, black, workingDir, args=[]):
args = args[:]
logs = []
test_list = self.getTestList(tests, black)
if len(test_list) != 1:
args = [a for a in args if not a.startswith("--gtest_output=")]
ret = 0
for test in test_list:
more_args = []
exe = self.getTest(test)
if exe in ["java", "python2", "python3"]:
logname = None
else:
userlog = [a for a in args if a.startswith("--gtest_output=")]
if len(userlog) == 0:
logname = self.getLogName(exe)
more_args.append("--gtest_output=xml:" + logname)
else:
logname = userlog[0][userlog[0].find(":")+1:]
log.debug("Running the test: %s (%s) ==> %s in %s", exe, args + more_args, logname, workingDir)
if self.options.dry_run:
logfile, r = None, 0
else:
logfile, r = self.runTest(test, exe, logname, workingDir, args + more_args)
log.debug("Test returned: %s ==> %s", r, logfile)
if r != 0:
ret = r
if logfile:
logs.append(os.path.relpath(logfile, workingDir))
return logs, ret
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")

View File

@@ -0,0 +1,203 @@
#!/usr/bin/env python
import sys
import os
import platform
import re
import tempfile
import glob
import logging
import shutil
from subprocess import check_call, check_output, CalledProcessError, STDOUT
def initLogger():
logger = logging.getLogger("run.py")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stderr)
ch.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(ch)
return logger
log = initLogger()
hostos = os.name # 'nt', 'posix'
class Err(Exception):
def __init__(self, msg, *args):
self.msg = msg % args
def execute(cmd, silent=False, cwd=".", env=None):
try:
log.debug("Run: %s", cmd)
if env is not None:
for k in env:
log.debug(" Environ: %s=%s", k, env[k])
new_env = os.environ.copy()
new_env.update(env)
env = new_env
if sys.platform == 'darwin': # https://github.com/opencv/opencv/issues/14351
if env is None:
env = os.environ.copy()
if 'DYLD_LIBRARY_PATH' in env:
env['OPENCV_SAVED_DYLD_LIBRARY_PATH'] = env['DYLD_LIBRARY_PATH']
if silent:
return check_output(cmd, stderr=STDOUT, cwd=cwd, env=env).decode("latin-1")
else:
return check_call(cmd, cwd=cwd, env=env)
except CalledProcessError as e:
if silent:
log.debug("Process returned: %d", e.returncode)
return e.output.decode("latin-1")
else:
log.error("Process returned: %d", e.returncode)
return e.returncode
def isColorEnabled(args):
usercolor = [a for a in args if a.startswith("--gtest_color=")]
return len(usercolor) == 0 and sys.stdout.isatty() and hostos != "nt"
def getPlatformVersion():
mv = platform.mac_ver()
if mv[0]:
return "Darwin" + mv[0]
else:
wv = platform.win32_ver()
if wv[0]:
return "Windows" + wv[0]
else:
lv = platform.linux_distribution()
if lv[0]:
return lv[0] + lv[1]
return None
parse_patterns = (
{'name': "cmake_home", 'default': None, 'pattern': re.compile(r"^CMAKE_HOME_DIRECTORY:\w+=(.+)$")},
{'name': "opencv_home", 'default': None, 'pattern': re.compile(r"^OpenCV_SOURCE_DIR:\w+=(.+)$")},
{'name': "opencv_build", 'default': None, 'pattern': re.compile(r"^OpenCV_BINARY_DIR:\w+=(.+)$")},
{'name': "tests_dir", 'default': None, 'pattern': re.compile(r"^EXECUTABLE_OUTPUT_PATH:\w+=(.+)$")},
{'name': "build_type", 'default': "Release", 'pattern': re.compile(r"^CMAKE_BUILD_TYPE:\w+=(.*)$")},
{'name': "android_abi", 'default': None, 'pattern': re.compile(r"^ANDROID_ABI:\w+=(.*)$")},
{'name': "android_executable", 'default': None, 'pattern': re.compile(r"^ANDROID_EXECUTABLE:\w+=(.*android.*)$")},
{'name': "ant_executable", 'default': None, 'pattern': re.compile(r"^ANT_EXECUTABLE:\w+=(.*ant.*)$")},
{'name': "java_test_dir", 'default': None, 'pattern': re.compile(r"^OPENCV_JAVA_TEST_DIR:\w+=(.*)$")},
{'name': "is_x64", 'default': "OFF", 'pattern': re.compile(r"^CUDA_64_BIT_DEVICE_CODE:\w+=(ON)$")},
{'name': "cmake_generator", 'default': None, 'pattern': re.compile(r"^CMAKE_GENERATOR:\w+=(.+)$")},
{'name': "python2", 'default': None, 'pattern': re.compile(r"^BUILD_opencv_python2:\w+=(.*)$")},
{'name': "python3", 'default': None, 'pattern': re.compile(r"^BUILD_opencv_python3:\w+=(.*)$")},
)
class CMakeCache:
def __init__(self, cfg=None):
self.setDefaultAttrs()
self.main_modules = []
if cfg:
self.build_type = cfg
def setDummy(self, path):
self.tests_dir = os.path.normpath(path)
def read(self, path, fname):
rx = re.compile(r'^OPENCV_MODULE_opencv_(\w+)_LOCATION:INTERNAL=(.*)$')
module_paths = {} # name -> path
with open(fname, "rt") as cachefile:
for l in cachefile.readlines():
ll = l.strip()
if not ll or ll.startswith("#"):
continue
for p in parse_patterns:
match = p["pattern"].match(ll)
if match:
value = match.groups()[0]
if value and not value.endswith("-NOTFOUND"):
setattr(self, p["name"], value)
# log.debug("cache value: %s = %s", p["name"], value)
match = rx.search(ll)
if match:
module_paths[match.group(1)] = match.group(2)
if not self.tests_dir:
self.tests_dir = path
else:
rel = os.path.relpath(self.tests_dir, self.opencv_build)
self.tests_dir = os.path.join(path, rel)
self.tests_dir = os.path.normpath(self.tests_dir)
# fix VS test binary path (add Debug or Release)
if "Visual Studio" in self.cmake_generator:
self.tests_dir = os.path.join(self.tests_dir, self.build_type)
for module, path in module_paths.items():
rel = os.path.relpath(path, self.opencv_home)
if ".." not in rel:
self.main_modules.append(module)
def setDefaultAttrs(self):
for p in parse_patterns:
setattr(self, p["name"], p["default"])
def gatherTests(self, mask, isGood=None):
if self.tests_dir and os.path.isdir(self.tests_dir):
d = os.path.abspath(self.tests_dir)
files = glob.glob(os.path.join(d, mask))
if not self.getOS() == "android" and self.withJava():
files.append("java")
if self.withPython2():
files.append("python2")
if self.withPython3():
files.append("python3")
return [f for f in files if isGood(f)]
return []
def isMainModule(self, name):
return name in self.main_modules + ['python2', 'python3']
def withJava(self):
return self.ant_executable and self.java_test_dir and os.path.exists(self.java_test_dir)
def withPython2(self):
return self.python2 == 'ON'
def withPython3(self):
return self.python3 == 'ON'
def getOS(self):
if self.android_executable:
return "android"
else:
return hostos
class TempEnvDir:
def __init__(self, envname, prefix):
self.envname = envname
self.prefix = prefix
self.saved_name = None
self.new_name = None
def init(self):
self.saved_name = os.environ.get(self.envname)
self.new_name = tempfile.mkdtemp(prefix=self.prefix, dir=self.saved_name or None)
os.environ[self.envname] = self.new_name
def clean(self):
if self.saved_name:
os.environ[self.envname] = self.saved_name
else:
del os.environ[self.envname]
try:
shutil.rmtree(self.new_name)
except:
pass
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")

296
modules/ts/misc/summary.py Normal file
View File

@@ -0,0 +1,296 @@
#!/usr/bin/env python
import testlog_parser, sys, os, xml, glob, re
from table_formatter import *
from optparse import OptionParser
numeric_re = re.compile("(\d+)")
cvtype_re = re.compile("(8U|8S|16U|16S|32S|32F|64F)C(\d{1,3})")
cvtypes = { '8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6 }
convert = lambda text: int(text) if text.isdigit() else text
keyselector = lambda a: cvtype_re.sub(lambda match: " " + str(cvtypes.get(match.group(1), 7) + (int(match.group(2))-1) * 8) + " ", a)
alphanum_keyselector = lambda key: [ convert(c) for c in numeric_re.split(keyselector(key)) ]
def getSetName(tset, idx, columns, short = True):
if columns and len(columns) > idx:
prefix = columns[idx]
else:
prefix = None
if short and prefix:
return prefix
name = tset[0].replace(".xml","").replace("_", "\n")
if prefix:
return prefix + "\n" + ("-"*int(len(max(prefix.split("\n"), key=len))*1.5)) + "\n" + name
return name
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
exit(0)
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html', 'markdown', 'tabs' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
parser.add_option("", "--progress", action="store_true", dest="progress_mode", default=False, help="enable progress mode")
parser.add_option("", "--regressions", dest="regressions", default=None, metavar="LIST", help="comma-separated custom regressions map: \"[r][c]#current-#reference\" (indexes of columns are 0-based, \"r\" - reverse flag, \"c\" - color flag for base data)")
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
parser.add_option("", "--match", dest="match", default=None)
parser.add_option("", "--match-replace", dest="match_replace", default="")
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
parser.add_option("", "--intersect-logs", dest="intersect_logs", default=False, help="show only tests present in all log files")
parser.add_option("", "--show_units", action="store_true", dest="show_units", help="append units into table cells")
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
if options.metric not in metrix_table:
options.metric = "gmean"
if options.metric.endswith("%") or options.metric.endswith("$"):
options.calc_relatives = False
options.calc_cr = False
if options.columns:
options.columns = [s.strip().replace("\\n", "\n") for s in options.columns.split(",")]
if options.regressions:
assert not options.progress_mode, 'unsupported mode'
def parseRegressionColumn(s):
""" Format: '[r][c]<uint>-<uint>' """
reverse = s.startswith('r')
if reverse:
s = s[1:]
addColor = s.startswith('c')
if addColor:
s = s[1:]
parts = s.split('-', 1)
link = (int(parts[0]), int(parts[1]), reverse, addColor)
assert link[0] != link[1]
return link
options.regressions = [parseRegressionColumn(s) for s in options.regressions.split(',')]
show_units = options.units if options.show_units else None
# expand wildcards and filter duplicates
files = []
seen = set()
for arg in args:
if ("*" in arg) or ("?" in arg):
flist = [os.path.abspath(f) for f in glob.glob(arg)]
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
else:
fname = os.path.abspath(arg)
if fname not in seen and not seen.add(fname):
files.append(fname)
# read all passed files
test_sets = []
for arg in files:
try:
tests = testlog_parser.parseLogFile(arg)
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
if options.match:
tests = [t for t in tests if t.get("status") != "notrun"]
if tests:
test_sets.append((os.path.basename(arg), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + arg + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + arg + "\" - " + str(err) + os.linesep)
if not test_sets:
sys.stderr.write("Error: no test data found" + os.linesep)
quit()
setsCount = len(test_sets)
if options.regressions is None:
reference = -1 if options.progress_mode else 0
options.regressions = [(i, reference, False, True) for i in range(1, len(test_sets))]
for link in options.regressions:
(i, ref, reverse, addColor) = link
assert i >= 0 and i < setsCount
assert ref < setsCount
# find matches
test_cases = {}
name_extractor = lambda name: str(name)
if options.match:
reg = re.compile(options.match)
name_extractor = lambda name: reg.sub(options.match_replace, str(name))
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if options.module:
name = options.module + "::" + name
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
# build table
getter = metrix_table[options.metric][1]
getter_score = metrix_table["score"][1] if options.calc_score else None
getter_p = metrix_table[options.metric + "%"][1] if options.calc_relatives else None
getter_cr = metrix_table[options.metric + "$"][1] if options.calc_cr else None
tbl = table('%s (%s)' % (metrix_table[options.metric][0], options.units), options.format)
# header
tbl.newColumn("name", "Name of Test", align = "left", cssclass = "col_name")
for i in range(setsCount):
tbl.newColumn(str(i), getSetName(test_sets[i], i, options.columns, False), align = "center")
def addHeaderColumns(suffix, description, cssclass):
for link in options.regressions:
(i, ref, reverse, addColor) = link
if reverse:
i, ref = ref, i
current_set = test_sets[i]
current = getSetName(current_set, i, options.columns)
if ref >= 0:
reference_set = test_sets[ref]
reference = getSetName(reference_set, ref, options.columns)
else:
reference = 'previous'
tbl.newColumn(str(i) + '-' + str(ref) + suffix, '%s\nvs\n%s\n(%s)' % (current, reference, description), align='center', cssclass=cssclass)
if options.calc_cr:
addHeaderColumns(suffix='$', description='cycles reduction', cssclass='col_cr')
if options.calc_relatives:
addHeaderColumns(suffix='%', description='x-factor', cssclass='col_rel')
if options.calc_score:
addHeaderColumns(suffix='S', description='score', cssclass='col_name')
# rows
prevGroupName = None
needNewRow = True
lastRow = None
for name in sorted(test_cases.keys(), key=alphanum_keyselector):
cases = test_cases[name]
if needNewRow:
lastRow = tbl.newRow()
if not options.showall:
needNewRow = False
tbl.newCell("name", name)
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
prop = lastRow.props.get("cssclass", "")
if "firstingroup" not in prop:
lastRow.props["cssclass"] = prop + " firstingroup"
prevGroupName = groupName
for i in range(setsCount):
case = cases[i]
if case is None:
if options.intersect_logs:
needNewRow = False
break
tbl.newCell(str(i), "-")
else:
status = case.get("status")
if status != "run":
tbl.newCell(str(i), status, color="red")
else:
val = getter(case, cases[0], options.units)
if val:
needNewRow = True
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val)
if needNewRow:
for link in options.regressions:
(i, reference, reverse, addColor) = link
if reverse:
i, reference = reference, i
tblCellID = str(i) + '-' + str(reference)
case = cases[i]
if case is None:
if options.calc_relatives:
tbl.newCell(tblCellID + "%", "-")
if options.calc_cr:
tbl.newCell(tblCellID + "$", "-")
if options.calc_score:
tbl.newCell(tblCellID + "$", "-")
else:
status = case.get("status")
if status != "run":
tbl.newCell(str(i), status, color="red")
if status != "notrun":
needNewRow = True
if options.calc_relatives:
tbl.newCell(tblCellID + "%", "-", color="red")
if options.calc_cr:
tbl.newCell(tblCellID + "$", "-", color="red")
if options.calc_score:
tbl.newCell(tblCellID + "S", "-", color="red")
else:
val = getter(case, cases[0], options.units)
def getRegression(fn):
if fn and val:
for j in reversed(range(i)) if reference < 0 else [reference]:
r = cases[j]
if r is not None and r.get("status") == 'run':
return fn(case, r, options.units)
valp = getRegression(getter_p) if options.calc_relatives or options.progress_mode else None
valcr = getRegression(getter_cr) if options.calc_cr else None
val_score = getRegression(getter_score) if options.calc_score else None
if not valp:
color = None
elif valp > 1.05:
color = 'green'
elif valp < 0.95:
color = 'red'
else:
color = None
if addColor:
if not reverse:
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val, color=color)
else:
r = cases[reference]
if r is not None and r.get("status") == 'run':
val = getter(r, cases[0], options.units)
tbl.newCell(str(reference), formatValue(val, options.metric, show_units), val, color=color)
if options.calc_relatives:
tbl.newCell(tblCellID + "%", formatValue(valp, "%"), valp, color=color, bold=color)
if options.calc_cr:
tbl.newCell(tblCellID + "$", formatValue(valcr, "$"), valcr, color=color, bold=color)
if options.calc_score:
tbl.newCell(tblCellID + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
if not needNewRow:
tbl.trimLastRow()
if options.regressionsOnly:
for r in reversed(range(len(tbl.rows))):
for i in range(1, len(options.regressions) + 1):
val = tbl.rows[r].cells[len(tbl.rows[r].cells) - i].value
if val is not None and val < float(options.regressionsOnly):
break
else:
tbl.rows.pop(r)
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Summary report for %s tests from %s test logs" % (len(test_cases), setsCount))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
if options.regressionsOnly:
sys.exit(len(tbl.rows))

View File

@@ -0,0 +1,814 @@
#!/usr/bin/env python
from __future__ import print_function
import sys, re, os.path, cgi, stat, math
from optparse import OptionParser
from color import getColorizer, dummyColorizer
class tblCell(object):
def __init__(self, text, value = None, props = None):
self.text = text
self.value = value
self.props = props
class tblColumn(object):
def __init__(self, caption, title = None, props = None):
self.text = caption
self.title = title
self.props = props
class tblRow(object):
def __init__(self, colsNum, props = None):
self.cells = [None] * colsNum
self.props = props
def htmlEncode(str):
return '<br/>'.join([cgi.escape(s) for s in str])
class table(object):
def_align = "left"
def_valign = "middle"
def_color = None
def_colspan = 1
def_rowspan = 1
def_bold = False
def_italic = False
def_text="-"
def __init__(self, caption = None, format=None):
self.format = format
self.is_markdown = self.format == 'markdown'
self.is_tabs = self.format == 'tabs'
self.columns = {}
self.rows = []
self.ridx = -1;
self.caption = caption
pass
def newRow(self, **properties):
if len(self.rows) - 1 == self.ridx:
self.rows.append(tblRow(len(self.columns), properties))
else:
self.rows[self.ridx + 1].props = properties
self.ridx += 1
return self.rows[self.ridx]
def trimLastRow(self):
if self.rows:
self.rows.pop()
if self.ridx >= len(self.rows):
self.ridx = len(self.rows) - 1
def newColumn(self, name, caption, title = None, **properties):
if name in self.columns:
index = self.columns[name].index
else:
index = len(self.columns)
if isinstance(caption, tblColumn):
caption.index = index
self.columns[name] = caption
return caption
else:
col = tblColumn(caption, title, properties)
col.index = index
self.columns[name] = col
return col
def getColumn(self, name):
if isinstance(name, str):
return self.columns.get(name, None)
else:
vals = [v for v in self.columns.values() if v.index == name]
if vals:
return vals[0]
return None
def newCell(self, col_name, text, value = None, **properties):
if self.ridx < 0:
self.newRow()
col = self.getColumn(col_name)
row = self.rows[self.ridx]
if not col:
return None
if isinstance(text, tblCell):
cl = text
else:
cl = tblCell(text, value, properties)
row.cells[col.index] = cl
return cl
def layoutTable(self):
columns = self.columns.values()
columns = sorted(columns, key=lambda c: c.index)
colspanned = []
rowspanned = []
self.headerHeight = 1
rowsToAppend = 0
for col in columns:
self.measureCell(col)
if col.height > self.headerHeight:
self.headerHeight = col.height
col.minwidth = col.width
col.line = None
for r in range(len(self.rows)):
row = self.rows[r]
row.minheight = 1
for i in range(len(row.cells)):
cell = row.cells[i]
if row.cells[i] is None:
continue
cell.line = None
self.measureCell(cell)
colspan = int(self.getValue("colspan", cell))
rowspan = int(self.getValue("rowspan", cell))
if colspan > 1:
colspanned.append((r,i))
if i + colspan > len(columns):
colspan = len(columns) - i
cell.colspan = colspan
#clear spanned cells
for j in range(i+1, min(len(row.cells), i + colspan)):
row.cells[j] = None
elif columns[i].minwidth < cell.width:
columns[i].minwidth = cell.width
if rowspan > 1:
rowspanned.append((r,i))
rowsToAppend2 = r + colspan - len(self.rows)
if rowsToAppend2 > rowsToAppend:
rowsToAppend = rowsToAppend2
cell.rowspan = rowspan
#clear spanned cells
for j in range(r+1, min(len(self.rows), r + rowspan)):
if len(self.rows[j].cells) > i:
self.rows[j].cells[i] = None
elif row.minheight < cell.height:
row.minheight = cell.height
self.ridx = len(self.rows) - 1
for r in range(rowsToAppend):
self.newRow()
self.rows[len(self.rows) - 1].minheight = 1
while colspanned:
colspanned_new = []
for r, c in colspanned:
cell = self.rows[r].cells[c]
sum([col.minwidth for col in columns[c:c + cell.colspan]])
cell.awailable = sum([col.minwidth for col in columns[c:c + cell.colspan]]) + cell.colspan - 1
if cell.awailable < cell.width:
colspanned_new.append((r,c))
colspanned = colspanned_new
if colspanned:
r,c = colspanned[0]
cell = self.rows[r].cells[c]
cols = columns[c:c + cell.colspan]
total = cell.awailable - cell.colspan + 1
budget = cell.width - cell.awailable
spent = 0
s = 0
for col in cols:
s += col.minwidth
addition = s * budget / total - spent
spent += addition
col.minwidth += addition
while rowspanned:
rowspanned_new = []
for r, c in rowspanned:
cell = self.rows[r].cells[c]
cell.awailable = sum([row.minheight for row in self.rows[r:r + cell.rowspan]])
if cell.awailable < cell.height:
rowspanned_new.append((r,c))
rowspanned = rowspanned_new
if rowspanned:
r,c = rowspanned[0]
cell = self.rows[r].cells[c]
rows = self.rows[r:r + cell.rowspan]
total = cell.awailable
budget = cell.height - cell.awailable
spent = 0
s = 0
for row in rows:
s += row.minheight
addition = s * budget / total - spent
spent += addition
row.minheight += addition
return columns
def measureCell(self, cell):
text = self.getValue("text", cell)
cell.text = self.reformatTextValue(text)
cell.height = len(cell.text)
cell.width = len(max(cell.text, key = lambda line: len(line)))
def reformatTextValue(self, value):
if sys.version_info >= (2,7):
unicode = str
if isinstance(value, str):
vstr = value
elif isinstance(value, unicode):
vstr = str(value)
else:
try:
vstr = '\n'.join([str(v) for v in value])
except TypeError:
vstr = str(value)
return vstr.splitlines()
def adjustColWidth(self, cols, width):
total = sum([c.minWidth for c in cols])
if total + len(cols) - 1 >= width:
return
budget = width - len(cols) + 1 - total
spent = 0
s = 0
for col in cols:
s += col.minWidth
addition = s * budget / total - spent
spent += addition
col.minWidth += addition
def getValue(self, name, *elements):
for el in elements:
try:
return getattr(el, name)
except AttributeError:
pass
try:
val = el.props[name]
if val:
return val
except AttributeError:
pass
except KeyError:
pass
try:
return getattr(self.__class__, "def_" + name)
except AttributeError:
return None
def consolePrintTable(self, out):
columns = self.layoutTable()
colrizer = getColorizer(out) if not (self.is_markdown or self.is_tabs) else dummyColorizer(out)
if self.caption:
out.write("%s%s%s" % ( os.linesep, os.linesep.join(self.reformatTextValue(self.caption)), os.linesep * 2))
headerRow = tblRow(len(columns), {"align": "center", "valign": "top", "bold": True, "header": True})
headerRow.cells = columns
headerRow.minheight = self.headerHeight
self.consolePrintRow2(colrizer, headerRow, columns)
for i in range(0, len(self.rows)):
self.consolePrintRow2(colrizer, i, columns)
def consolePrintRow2(self, out, r, columns):
if isinstance(r, tblRow):
row = r
r = -1
else:
row = self.rows[r]
#evaluate initial values for line numbers
i = 0
while i < len(row.cells):
cell = row.cells[i]
colspan = self.getValue("colspan", cell)
if cell is not None:
cell.wspace = sum([col.minwidth for col in columns[i:i + colspan]]) + colspan - 1
if cell.line is None:
if r < 0:
rows = [row]
else:
rows = self.rows[r:r + self.getValue("rowspan", cell)]
cell.line = self.evalLine(cell, rows, columns[i])
if len(rows) > 1:
for rw in rows:
rw.cells[i] = cell
i += colspan
#print content
if self.is_markdown:
out.write("|")
for c in row.cells:
text = ' '.join(self.getValue('text', c) or [])
out.write(text + "|")
out.write(os.linesep)
elif self.is_tabs:
cols_to_join=[' '.join(self.getValue('text', c) or []) for c in row.cells]
out.write('\t'.join(cols_to_join))
out.write(os.linesep)
else:
for ln in range(row.minheight):
i = 0
while i < len(row.cells):
if i > 0:
out.write(" ")
cell = row.cells[i]
column = columns[i]
if cell is None:
out.write(" " * column.minwidth)
i += 1
else:
self.consolePrintLine(cell, row, column, out)
i += self.getValue("colspan", cell)
if self.is_markdown:
out.write("|")
out.write(os.linesep)
if self.is_markdown and row.props.get('header', False):
out.write("|")
for th in row.cells:
align = self.getValue("align", th)
if align == 'center':
out.write(":-:|")
elif align == 'right':
out.write("--:|")
else:
out.write("---|")
out.write(os.linesep)
def consolePrintLine(self, cell, row, column, out):
if cell.line < 0 or cell.line >= cell.height:
line = ""
else:
line = cell.text[cell.line]
width = cell.wspace
align = self.getValue("align", ((None, cell)[isinstance(cell, tblCell)]), row, column)
if align == "right":
pattern = "%" + str(width) + "s"
elif align == "center":
pattern = "%" + str((width - len(line)) // 2 + len(line)) + "s" + " " * (width - len(line) - (width - len(line)) // 2)
else:
pattern = "%-" + str(width) + "s"
out.write(pattern % line, color = self.getValue("color", cell, row, column))
cell.line += 1
def evalLine(self, cell, rows, column):
height = cell.height
valign = self.getValue("valign", cell, rows[0], column)
space = sum([row.minheight for row in rows])
if valign == "bottom":
return height - space
if valign == "middle":
return (height - space + 1) // 2
return 0
def htmlPrintTable(self, out, embeedcss = False):
columns = self.layoutTable()
if embeedcss:
out.write("<div style=\"font-family: Lucida Console, Courier New, Courier;font-size: 16px;color:#3e4758;\">\n<table style=\"background:none repeat scroll 0 0 #FFFFFF;border-collapse:collapse;font-family:'Lucida Sans Unicode','Lucida Grande',Sans-Serif;font-size:14px;margin:20px;text-align:left;width:480px;margin-left: auto;margin-right: auto;white-space:nowrap;\">\n")
else:
out.write("<div class=\"tableFormatter\">\n<table class=\"tbl\">\n")
if self.caption:
if embeedcss:
out.write(" <caption style=\"font:italic 16px 'Trebuchet MS',Verdana,Arial,Helvetica,sans-serif;padding:0 0 5px;text-align:right;white-space:normal;\">%s</caption>\n" % htmlEncode(self.reformatTextValue(self.caption)))
else:
out.write(" <caption>%s</caption>\n" % htmlEncode(self.reformatTextValue(self.caption)))
out.write(" <thead>\n")
headerRow = tblRow(len(columns), {"align": "center", "valign": "top", "bold": True, "header": True})
headerRow.cells = columns
header_rows = [headerRow]
header_rows.extend([row for row in self.rows if self.getValue("header")])
last_row = header_rows[len(header_rows) - 1]
for row in header_rows:
out.write(" <tr>\n")
for th in row.cells:
align = self.getValue("align", ((None, th)[isinstance(th, tblCell)]), row, row)
valign = self.getValue("valign", th, row)
cssclass = self.getValue("cssclass", th)
attr = ""
if align:
attr += " align=\"%s\"" % align
if valign:
attr += " valign=\"%s\"" % valign
if cssclass:
attr += " class=\"%s\"" % cssclass
css = ""
if embeedcss:
css = " style=\"border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;\""
if row == last_row:
css = css[:-1] + "padding-bottom:5px;\""
out.write(" <th%s%s>\n" % (attr, css))
if th is not None:
out.write(" %s\n" % htmlEncode(th.text))
out.write(" </th>\n")
out.write(" </tr>\n")
out.write(" </thead>\n <tbody>\n")
rows = [row for row in self.rows if not self.getValue("header")]
for r in range(len(rows)):
row = rows[r]
rowattr = ""
cssclass = self.getValue("cssclass", row)
if cssclass:
rowattr += " class=\"%s\"" % cssclass
out.write(" <tr%s>\n" % (rowattr))
i = 0
while i < len(row.cells):
column = columns[i]
td = row.cells[i]
if isinstance(td, int):
i += td
continue
colspan = self.getValue("colspan", td)
rowspan = self.getValue("rowspan", td)
align = self.getValue("align", td, row, column)
valign = self.getValue("valign", td, row, column)
color = self.getValue("color", td, row, column)
bold = self.getValue("bold", td, row, column)
italic = self.getValue("italic", td, row, column)
style = ""
attr = ""
if color:
style += "color:%s;" % color
if bold:
style += "font-weight: bold;"
if italic:
style += "font-style: italic;"
if align and align != "left":
attr += " align=\"%s\"" % align
if valign and valign != "middle":
attr += " valign=\"%s\"" % valign
if colspan > 1:
attr += " colspan=\"%s\"" % colspan
if rowspan > 1:
attr += " rowspan=\"%s\"" % rowspan
for q in range(r+1, min(r+rowspan, len(rows))):
rows[q].cells[i] = colspan
if style:
attr += " style=\"%s\"" % style
css = ""
if embeedcss:
css = " style=\"border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;\""
if r == 0:
css = css[:-1] + "border-top:2px solid #6678B1;\""
out.write(" <td%s%s>\n" % (attr, css))
if td is not None:
out.write(" %s\n" % htmlEncode(td.text))
out.write(" </td>\n")
i += colspan
out.write(" </tr>\n")
out.write(" </tbody>\n</table>\n</div>\n")
def htmlPrintHeader(out, title = None):
if title:
titletag = "<title>%s</title>\n" % htmlEncode([str(title)])
else:
titletag = ""
out.write("""<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=us-ascii">
%s<style type="text/css">
html, body {font-family: Lucida Console, Courier New, Courier;font-size: 16px;color:#3e4758;}
.tbl{background:none repeat scroll 0 0 #FFFFFF;border-collapse:collapse;font-family:"Lucida Sans Unicode","Lucida Grande",Sans-Serif;font-size:14px;margin:20px;text-align:left;width:480px;margin-left: auto;margin-right: auto;white-space:nowrap;}
.tbl span{display:block;white-space:nowrap;}
.tbl thead tr:last-child th {padding-bottom:5px;}
.tbl tbody tr:first-child td {border-top:3px solid #6678B1;}
.tbl th{border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;}
.tbl td{border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;}
.tbl tbody tr:hover td{color:#000099;}
.tbl caption{font:italic 16px "Trebuchet MS",Verdana,Arial,Helvetica,sans-serif;padding:0 0 5px;text-align:right;white-space:normal;}
.firstingroup {border-top:2px solid #6678B1;}
</style>
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script>
<script type="text/javascript">
function abs(val) { return val < 0 ? -val : val }
$(function(){
//generate filter rows
$("div.tableFormatter table.tbl").each(function(tblIdx, tbl) {
var head = $("thead", tbl)
var filters = $("<tr></tr>")
var hasAny = false
$("tr:first th", head).each(function(colIdx, col) {
col = $(col)
var cell
var id = "t" + tblIdx + "r" + colIdx
if (col.hasClass("col_name")){
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_name' title='Regular expression for name filtering (&quot;resize.*640x480&quot; - resize tests on VGA resolution)'></input></th>")
hasAny = true
}
else if (col.hasClass("col_rel")){
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_rel' title='Filter out lines with a x-factor of acceleration less than Nx'></input></th>")
hasAny = true
}
else if (col.hasClass("col_cr")){
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_cr' title='Filter out lines with a percentage of acceleration less than N%%'></input></th>")
hasAny = true
}
else
cell = $("<th></th>")
cell.appendTo(filters)
})
if (hasAny){
$(tbl).wrap("<form id='form_t" + tblIdx + "' method='get' action=''></form>")
$("<input it='test' type='submit' value='Apply Filters' style='margin-left:10px;'></input>")
.appendTo($("th:last", filters.appendTo(head)))
}
})
//get filter values
var vars = []
var hashes = window.location.href.slice(window.location.href.indexOf('?') + 1).split('&')
for(var i = 0; i < hashes.length; ++i)
{
hash = hashes[i].split('=')
vars.push(decodeURIComponent(hash[0]))
vars[decodeURIComponent(hash[0])] = decodeURIComponent(hash[1]);
}
//set filter values
for(var i = 0; i < vars.length; ++i)
$("#" + vars[i]).val(vars[vars[i]])
//apply filters
$("div.tableFormatter table.tbl").each(function(tblIdx, tbl) {
filters = $("input:text", tbl)
var predicate = function(row) {return true;}
var empty = true
$.each($("input:text", tbl), function(i, flt) {
flt = $(flt)
var val = flt.val()
var pred = predicate;
if(val) {
empty = false
var colIdx = parseInt(flt.attr("id").slice(flt.attr("id").indexOf('r') + 1))
if(flt.hasClass("filter_col_name")) {
var re = new RegExp(val);
predicate = function(row) {
if (re.exec($(row.get(colIdx)).text()) == null)
return false
return pred(row)
}
} else if(flt.hasClass("filter_col_rel")) {
var percent = parseFloat(val)
if (percent < 0) {
predicate = function(row) {
var val = parseFloat($(row.get(colIdx)).text())
if (!val || val >= 1 || val > 1+percent)
return false
return pred(row)
}
} else {
predicate = function(row) {
var val = parseFloat($(row.get(colIdx)).text())
if (!val || val < percent)
return false
return pred(row)
}
}
} else if(flt.hasClass("filter_col_cr")) {
var percent = parseFloat(val)
predicate = function(row) {
var val = parseFloat($(row.get(colIdx)).text())
if (!val || val < percent)
return false
return pred(row)
}
}
}
});
if (!empty){
$("tbody tr", tbl).each(function (i, tbl_row) {
if(!predicate($("td", tbl_row)))
$(tbl_row).remove()
})
if($("tbody tr", tbl).length == 0) {
$("<tr><td colspan='"+$("thead tr:first th", tbl).length+"'>No results matching your search criteria</td></tr>")
.appendTo($("tbody", tbl))
}
}
})
})
</script>
</head>
<body>
""" % titletag)
def htmlPrintFooter(out):
out.write("</body>\n</html>")
def getStdoutFilename():
try:
if os.name == "nt":
import msvcrt, ctypes
handle = msvcrt.get_osfhandle(sys.stdout.fileno())
size = ctypes.c_ulong(1024)
nameBuffer = ctypes.create_string_buffer(size.value)
ctypes.windll.kernel32.GetFinalPathNameByHandleA(handle, nameBuffer, size, 4)
return nameBuffer.value
else:
return os.readlink('/proc/self/fd/1')
except:
return ""
def detectHtmlOutputType(requestedType):
if requestedType in ['txt', 'markdown']:
return False
elif requestedType in ["html", "moinwiki"]:
return True
else:
if sys.stdout.isatty():
return False
else:
outname = getStdoutFilename()
if outname:
if outname.endswith(".htm") or outname.endswith(".html"):
return True
else:
return False
else:
return False
def getRelativeVal(test, test0, metric):
if not test or not test0:
return None
val0 = test0.get(metric, "s")
if not val0:
return None
val = test.get(metric, "s")
if not val or val == 0:
return None
return float(val0)/val
def getCycleReduction(test, test0, metric):
if not test or not test0:
return None
val0 = test0.get(metric, "s")
if not val0 or val0 == 0:
return None
val = test.get(metric, "s")
if not val:
return None
return (1.0-float(val)/val0)*100
def getScore(test, test0, metric):
if not test or not test0:
return None
m0 = float(test.get("gmean", None))
m1 = float(test0.get("gmean", None))
if m0 == 0 or m1 == 0:
return None
s0 = float(test.get("gstddev", None))
s1 = float(test0.get("gstddev", None))
s = math.sqrt(s0*s0 + s1*s1)
m0 = math.log(m0)
m1 = math.log(m1)
if s == 0:
return None
return (m0-m1)/s
metrix_table = \
{
"name": ("Name of Test", lambda test,test0,units: str(test)),
"samples": ("Number of\ncollected samples", lambda test,test0,units: test.get("samples", units)),
"outliers": ("Number of\noutliers", lambda test,test0,units: test.get("outliers", units)),
"gmean": ("Geometric mean", lambda test,test0,units: test.get("gmean", units)),
"mean": ("Mean", lambda test,test0,units: test.get("mean", units)),
"min": ("Min", lambda test,test0,units: test.get("min", units)),
"median": ("Median", lambda test,test0,units: test.get("median", units)),
"stddev": ("Standard deviation", lambda test,test0,units: test.get("stddev", units)),
"gstddev": ("Standard deviation of Ln(time)", lambda test,test0,units: test.get("gstddev")),
"gmean%": ("Geometric mean (relative)", lambda test,test0,units: getRelativeVal(test, test0, "gmean")),
"mean%": ("Mean (relative)", lambda test,test0,units: getRelativeVal(test, test0, "mean")),
"min%": ("Min (relative)", lambda test,test0,units: getRelativeVal(test, test0, "min")),
"median%": ("Median (relative)", lambda test,test0,units: getRelativeVal(test, test0, "median")),
"stddev%": ("Standard deviation (relative)", lambda test,test0,units: getRelativeVal(test, test0, "stddev")),
"gstddev%": ("Standard deviation of Ln(time) (relative)", lambda test,test0,units: getRelativeVal(test, test0, "gstddev")),
"gmean$": ("Geometric mean (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "gmean")),
"mean$": ("Mean (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "mean")),
"min$": ("Min (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "min")),
"median$": ("Median (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "median")),
"stddev$": ("Standard deviation (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "stddev")),
"gstddev$": ("Standard deviation of Ln(time) (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "gstddev")),
"score": ("SCORE", lambda test,test0,units: getScore(test, test0, "gstddev")),
}
def formatValue(val, metric, units = None):
if val is None:
return "-"
if metric.endswith("%"):
return "%.2f" % val
if metric.endswith("$"):
return "%.2f%%" % val
if metric.endswith("S"):
if val > 3.5:
return "SLOWER"
if val < -3.5:
return "FASTER"
if val > -1.5 and val < 1.5:
return " "
if val < 0:
return "faster"
if val > 0:
return "slower"
#return "%.4f" % val
if units:
return "%.3f %s" % (val, units)
else:
return "%.3f" % val
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml")
exit(0)
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html', 'markdown' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
if options.metric not in metrix_table:
options.metric = "gmean"
#print options
#print args
# tbl = table()
# tbl.newColumn("first", "qqqq", align = "left")
# tbl.newColumn("second", "wwww\nz\nx\n")
# tbl.newColumn("third", "wwasdas")
#
# tbl.newCell(0, "ccc111", align = "right")
# tbl.newCell(1, "dddd1")
# tbl.newCell(2, "8768756754")
# tbl.newRow()
# tbl.newCell(0, "1\n2\n3\n4\n5\n6\n7", align = "center", colspan = 2, rowspan = 2)
# tbl.newCell(2, "xxx\nqqq", align = "center", colspan = 1, valign = "middle")
# tbl.newRow()
# tbl.newCell(2, "+", align = "center", colspan = 1, valign = "middle")
# tbl.newRow()
# tbl.newCell(0, "vcvvbasdsadassdasdasv", align = "right", colspan = 2)
# tbl.newCell(2, "dddd1")
# tbl.newRow()
# tbl.newCell(0, "vcvvbv")
# tbl.newCell(1, "3445324", align = "right")
# tbl.newCell(2, None)
# tbl.newCell(1, "0000")
# if sys.stdout.isatty():
# tbl.consolePrintTable(sys.stdout)
# else:
# htmlPrintHeader(sys.stdout)
# tbl.htmlPrintTable(sys.stdout)
# htmlPrintFooter(sys.stdout)
import testlog_parser
if options.generateHtml:
htmlPrintHeader(sys.stdout, "Tables demo")
getter = metrix_table[options.metric][1]
for arg in args:
tests = testlog_parser.parseLogFile(arg)
tbl = table(arg, format=options.format)
tbl.newColumn("name", "Name of Test", align = "left")
tbl.newColumn("value", metrix_table[options.metric][0], align = "center", bold = "true")
for t in sorted(tests):
tbl.newRow()
tbl.newCell("name", str(t))
status = t.get("status")
if status != "run":
tbl.newCell("value", status)
else:
val = getter(t, None, options.units)
if val:
if options.metric.endswith("%"):
tbl.newCell("value", "%.2f" % val, val)
else:
tbl.newCell("value", "%.3f %s" % (val, options.units), val)
else:
tbl.newCell("value", "-")
if options.generateHtml:
tbl.htmlPrintTable(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
if options.generateHtml:
htmlPrintFooter(sys.stdout)

View File

@@ -0,0 +1,232 @@
#!/usr/bin/env python
from __future__ import print_function
import collections
import re
import os.path
import sys
from xml.dom.minidom import parse
if sys.version_info > (3,):
long = int
def cmp(a, b): return (a>b)-(a<b)
class TestInfo(object):
def __init__(self, xmlnode):
self.fixture = xmlnode.getAttribute("classname")
self.name = xmlnode.getAttribute("name")
self.value_param = xmlnode.getAttribute("value_param")
self.type_param = xmlnode.getAttribute("type_param")
custom_status = xmlnode.getAttribute("custom_status")
failures = xmlnode.getElementsByTagName("failure")
if len(custom_status) > 0:
self.status = custom_status
elif len(failures) > 0:
self.status = "failed"
else:
self.status = xmlnode.getAttribute("status")
if self.name.startswith("DISABLED_"):
if self.status == 'notrun':
self.status = "disabled"
self.fixture = self.fixture.replace("DISABLED_", "")
self.name = self.name.replace("DISABLED_", "")
self.properties = {
prop.getAttribute("name") : prop.getAttribute("value")
for prop in xmlnode.getElementsByTagName("property")
if prop.hasAttribute("name") and prop.hasAttribute("value")
}
self.metrix = {}
self.parseLongMetric(xmlnode, "bytesIn");
self.parseLongMetric(xmlnode, "bytesOut");
self.parseIntMetric(xmlnode, "samples");
self.parseIntMetric(xmlnode, "outliers");
self.parseFloatMetric(xmlnode, "frequency", 1);
self.parseLongMetric(xmlnode, "min");
self.parseLongMetric(xmlnode, "median");
self.parseLongMetric(xmlnode, "gmean");
self.parseLongMetric(xmlnode, "mean");
self.parseLongMetric(xmlnode, "stddev");
self.parseFloatMetric(xmlnode, "gstddev");
self.parseFloatMetric(xmlnode, "time");
self.parseLongMetric(xmlnode, "total_memory_usage");
def parseLongMetric(self, xmlnode, name, default = 0):
if name in self.properties:
self.metrix[name] = long(self.properties[name])
elif xmlnode.hasAttribute(name):
self.metrix[name] = long(xmlnode.getAttribute(name))
else:
self.metrix[name] = default
def parseIntMetric(self, xmlnode, name, default = 0):
if name in self.properties:
self.metrix[name] = int(self.properties[name])
elif xmlnode.hasAttribute(name):
self.metrix[name] = int(xmlnode.getAttribute(name))
else:
self.metrix[name] = default
def parseFloatMetric(self, xmlnode, name, default = 0):
if name in self.properties:
self.metrix[name] = float(self.properties[name])
elif xmlnode.hasAttribute(name):
self.metrix[name] = float(xmlnode.getAttribute(name))
else:
self.metrix[name] = default
def parseStringMetric(self, xmlnode, name, default = None):
if name in self.properties:
self.metrix[name] = self.properties[name].strip()
elif xmlnode.hasAttribute(name):
self.metrix[name] = xmlnode.getAttribute(name).strip()
else:
self.metrix[name] = default
def get(self, name, units="ms"):
if name == "classname":
return self.fixture
if name == "name":
return self.name
if name == "fullname":
return self.__str__()
if name == "value_param":
return self.value_param
if name == "type_param":
return self.type_param
if name == "status":
return self.status
val = self.metrix.get(name, None)
if not val:
return val
if name == "time":
return self.metrix.get("time")
if name in ["gmean", "min", "mean", "median", "stddev"]:
scale = 1.0
frequency = self.metrix.get("frequency", 1.0) or 1.0
if units == "ms":
scale = 1000.0
if units == "us" or units == "mks": # mks is typo error for microsecond (<= OpenCV 3.4)
scale = 1000000.0
if units == "ns":
scale = 1000000000.0
if units == "ticks":
frequency = long(1)
scale = long(1)
return val * scale / frequency
return val
def dump(self, units="ms"):
print("%s ->\t\033[1;31m%s\033[0m = \t%.2f%s" % (str(self), self.status, self.get("gmean", units), units))
def getName(self):
pos = self.name.find("/")
if pos > 0:
return self.name[:pos]
return self.name
def getFixture(self):
if self.fixture.endswith(self.getName()):
fixture = self.fixture[:-len(self.getName())]
else:
fixture = self.fixture
if fixture.endswith("_"):
fixture = fixture[:-1]
return fixture
def param(self):
return '::'.join(filter(None, [self.type_param, self.value_param]))
def shortName(self):
name = self.getName()
fixture = self.getFixture()
return '::'.join(filter(None, [name, fixture]))
def __str__(self):
name = self.getName()
fixture = self.getFixture()
return '::'.join(filter(None, [name, fixture, self.type_param, self.value_param]))
def __cmp__(self, other):
r = cmp(self.fixture, other.fixture);
if r != 0:
return r
if self.type_param:
if other.type_param:
r = cmp(self.type_param, other.type_param);
if r != 0:
return r
else:
return -1
else:
if other.type_param:
return 1
if self.value_param:
if other.value_param:
r = cmp(self.value_param, other.value_param);
if r != 0:
return r
else:
return -1
else:
if other.value_param:
return 1
return 0
# This is a Sequence for compatibility with old scripts,
# which treat parseLogFile's return value as a list.
class TestRunInfo(collections.Sequence):
def __init__(self, properties, tests):
self.properties = properties
self.tests = tests
def __len__(self):
return len(self.tests)
def __getitem__(self, key):
return self.tests[key]
def parseLogFile(filename):
log = parse(filename)
properties = {
attr_name[3:]: attr_value
for (attr_name, attr_value) in log.documentElement.attributes.items()
if attr_name.startswith('cv_')
}
tests = list(map(TestInfo, log.getElementsByTagName("testcase")))
return TestRunInfo(properties, tests)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml")
exit(0)
for arg in sys.argv[1:]:
print("Processing {}...".format(arg))
run = parseLogFile(arg)
print("Properties:")
for (prop_name, prop_value) in run.properties.items():
print("\t{} = {}".format(prop_name, prop_value))
print("Tests:")
for t in sorted(run.tests):
t.dump()
print()

View File

@@ -0,0 +1,440 @@
from __future__ import print_function
import os
import sys
import csv
from pprint import pprint
from collections import deque
try:
long # Python 2
except NameError:
long = int # Python 3
# trace.hpp
REGION_FLAG_IMPL_MASK = 15 << 16
REGION_FLAG_IMPL_IPP = 1 << 16
REGION_FLAG_IMPL_OPENCL = 2 << 16
DEBUG = False
if DEBUG:
dprint = print
dpprint = pprint
else:
def dprint(args, **kwargs):
pass
def dpprint(args, **kwargs):
pass
def tryNum(s):
if s.startswith('0x'):
try:
return int(s, 16)
except ValueError:
pass
try:
return int(s)
except ValueError:
pass
if sys.version_info[0] < 3:
try:
return long(s)
except ValueError:
pass
return s
def formatTimestamp(t):
return "%.3f" % (t * 1e-6)
try:
from statistics import median
except ImportError:
def median(lst):
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1]) * 0.5
def getCXXFunctionName(spec):
def dropParams(spec):
pos = len(spec) - 1
depth = 0
while pos >= 0:
if spec[pos] == ')':
depth = depth + 1
elif spec[pos] == '(':
depth = depth - 1
if depth == 0:
if pos == 0 or spec[pos - 1] in ['#', ':']:
res = dropParams(spec[pos+1:-1])
return (spec[:pos] + res[0], res[1])
return (spec[:pos], spec[pos:])
pos = pos - 1
return (spec, '')
def extractName(spec):
pos = len(spec) - 1
inName = False
while pos >= 0:
if spec[pos] == ' ':
if inName:
return spec[pos+1:]
elif spec[pos].isalnum():
inName = True
pos = pos - 1
return spec
if spec.startswith('IPP') or spec.startswith('OpenCL'):
prefix_size = len('IPP') if spec.startswith('IPP') else len('OpenCL')
prefix = spec[:prefix_size]
if prefix_size < len(spec) and spec[prefix_size] in ['#', ':']:
prefix = prefix + spec[prefix_size]
prefix_size = prefix_size + 1
begin = prefix_size
while begin < len(spec):
if spec[begin].isalnum() or spec[begin] in ['_', ':']:
break
begin = begin + 1
if begin == len(spec):
return spec
end = begin
while end < len(spec):
if not (spec[end].isalnum() or spec[end] in ['_', ':']):
break
end = end + 1
return prefix + spec[begin:end]
spec = spec.replace(') const', ')') # const methods
(ret_type_name, params) = dropParams(spec)
name = extractName(ret_type_name)
if 'operator' in name:
return name + params
if name.startswith('&'):
return name[1:]
return name
stack_size = 10
class Trace:
def __init__(self, filename=None):
self.tasks = {}
self.tasks_list = []
self.locations = {}
self.threads_stack = {}
self.pending_files = deque()
if filename:
self.load(filename)
class TraceTask:
def __init__(self, threadID, taskID, locationID, beginTimestamp):
self.threadID = threadID
self.taskID = taskID
self.locationID = locationID
self.beginTimestamp = beginTimestamp
self.endTimestamp = None
self.parentTaskID = None
self.parentThreadID = None
self.childTask = []
self.selfTimeIPP = 0
self.selfTimeOpenCL = 0
self.totalTimeIPP = 0
self.totalTimeOpenCL = 0
def __repr__(self):
return "TID={} ID={} loc={} parent={}:{} begin={} end={} IPP={}/{} OpenCL={}/{}".format(
self.threadID, self.taskID, self.locationID, self.parentThreadID, self.parentTaskID,
self.beginTimestamp, self.endTimestamp, self.totalTimeIPP, self.selfTimeIPP, self.totalTimeOpenCL, self.selfTimeOpenCL)
class TraceLocation:
def __init__(self, locationID, filename, line, name, flags):
self.locationID = locationID
self.filename = os.path.split(filename)[1]
self.line = line
self.name = getCXXFunctionName(name)
self.flags = flags
def __str__(self):
return "{}#{}:{}".format(self.name, self.filename, self.line)
def __repr__(self):
return "ID={} {}:{}:{}".format(self.locationID, self.filename, self.line, self.name)
def parse_file(self, filename):
dprint("Process file: '{}'".format(filename))
with open(filename) as infile:
for line in infile:
line = str(line).strip()
if line[0] == "#":
if line.startswith("#thread file:"):
name = str(line.split(':', 1)[1]).strip()
self.pending_files.append(os.path.join(os.path.split(filename)[0], name))
continue
self.parse_line(line)
def parse_line(self, line):
opts = line.split(',')
dpprint(opts)
if opts[0] == 'l':
opts = list(csv.reader([line]))[0] # process quote more
locationID = int(opts[1])
filename = str(opts[2])
line = int(opts[3])
name = opts[4]
flags = tryNum(opts[5])
self.locations[locationID] = self.TraceLocation(locationID, filename, line, name, flags)
return
extra_opts = {}
for e in opts[5:]:
if not '=' in e:
continue
(k, v) = e.split('=')
extra_opts[k] = tryNum(v)
if extra_opts:
dpprint(extra_opts)
threadID = None
taskID = None
locationID = None
ts = None
if opts[0] in ['b', 'e']:
threadID = int(opts[1])
taskID = int(opts[4])
locationID = int(opts[3])
ts = tryNum(opts[2])
thread_stack = None
currentTask = (None, None)
if threadID is not None:
if not threadID in self.threads_stack:
thread_stack = deque()
self.threads_stack[threadID] = thread_stack
else:
thread_stack = self.threads_stack[threadID]
currentTask = None if not thread_stack else thread_stack[-1]
t = (threadID, taskID)
if opts[0] == 'b':
assert not t in self.tasks, "Duplicate task: " + str(t) + repr(self.tasks[t])
task = self.TraceTask(threadID, taskID, locationID, ts)
self.tasks[t] = task
self.tasks_list.append(task)
thread_stack.append((threadID, taskID))
if currentTask:
task.parentThreadID = currentTask[0]
task.parentTaskID = currentTask[1]
if 'parentThread' in extra_opts:
task.parentThreadID = extra_opts['parentThread']
if 'parent' in extra_opts:
task.parentTaskID = extra_opts['parent']
if opts[0] == 'e':
task = self.tasks[t]
task.endTimestamp = ts
if 'tIPP' in extra_opts:
task.selfTimeIPP = extra_opts['tIPP']
if 'tOCL' in extra_opts:
task.selfTimeOpenCL = extra_opts['tOCL']
thread_stack.pop()
def load(self, filename):
self.pending_files.append(filename)
if DEBUG:
with open(filename, 'r') as f:
print(f.read(), end='')
while self.pending_files:
self.parse_file(self.pending_files.pop())
def getParentTask(self, task):
return self.tasks.get((task.parentThreadID, task.parentTaskID), None)
def process(self):
self.tasks_list.sort(key=lambda x: x.beginTimestamp)
parallel_for_location = None
for (id, l) in self.locations.items():
if l.name == 'parallel_for':
parallel_for_location = l.locationID
break
for task in self.tasks_list:
try:
task.duration = task.endTimestamp - task.beginTimestamp
task.selfDuration = task.duration
except:
task.duration = None
task.selfDuration = None
task.totalTimeIPP = task.selfTimeIPP
task.totalTimeOpenCL = task.selfTimeOpenCL
dpprint(self.tasks)
dprint("Calculate total times")
for task in self.tasks_list:
parentTask = self.getParentTask(task)
if parentTask:
parentTask.selfDuration = parentTask.selfDuration - task.duration
parentTask.childTask.append(task)
timeIPP = task.selfTimeIPP
timeOpenCL = task.selfTimeOpenCL
while parentTask:
if parentTask.locationID == parallel_for_location: # TODO parallel_for
break
parentLocation = self.locations[parentTask.locationID]
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_IPP:
parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP
timeIPP = 0
else:
parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_OPENCL:
parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL
timeOpenCL = 0
else:
parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL
parentTask = self.getParentTask(parentTask)
dpprint(self.tasks)
dprint("Calculate total times (parallel_for)")
for task in self.tasks_list:
if task.locationID == parallel_for_location:
task.selfDuration = 0
childDuration = sum([t.duration for t in task.childTask])
if task.duration == 0 or childDuration == 0:
continue
timeCoef = task.duration / float(childDuration)
childTimeIPP = sum([t.totalTimeIPP for t in task.childTask])
childTimeOpenCL = sum([t.totalTimeOpenCL for t in task.childTask])
if childTimeIPP == 0 and childTimeOpenCL == 0:
continue
timeIPP = childTimeIPP * timeCoef
timeOpenCL = childTimeOpenCL * timeCoef
parentTask = task
while parentTask:
parentLocation = self.locations[parentTask.locationID]
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_IPP:
parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP
timeIPP = 0
else:
parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_OPENCL:
parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL
timeOpenCL = 0
else:
parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL
parentTask = self.getParentTask(parentTask)
dpprint(self.tasks)
dprint("Done")
def dump(self, max_entries):
assert isinstance(max_entries, int)
class CallInfo():
def __init__(self, callID):
self.callID = callID
self.totalTimes = []
self.selfTimes = []
self.threads = set()
self.selfTimesIPP = []
self.selfTimesOpenCL = []
self.totalTimesIPP = []
self.totalTimesOpenCL = []
calls = {}
for currentTask in self.tasks_list:
task = currentTask
callID = []
for i in range(stack_size):
callID.append(task.locationID)
task = self.getParentTask(task)
if not task:
break
callID = tuple(callID)
if not callID in calls:
call = CallInfo(callID)
calls[callID] = call
else:
call = calls[callID]
call.totalTimes.append(currentTask.duration)
call.selfTimes.append(currentTask.selfDuration)
call.threads.add(currentTask.threadID)
call.selfTimesIPP.append(currentTask.selfTimeIPP)
call.selfTimesOpenCL.append(currentTask.selfTimeOpenCL)
call.totalTimesIPP.append(currentTask.totalTimeIPP)
call.totalTimesOpenCL.append(currentTask.totalTimeOpenCL)
dpprint(self.tasks)
dpprint(self.locations)
dpprint(calls)
calls_self_sum = {k: sum(v.selfTimes) for (k, v) in calls.items()}
calls_total_sum = {k: sum(v.totalTimes) for (k, v) in calls.items()}
calls_median = {k: median(v.selfTimes) for (k, v) in calls.items()}
calls_sorted = sorted(calls.keys(), key=lambda x: calls_self_sum[x], reverse=True)
calls_self_sum_IPP = {k: sum(v.selfTimesIPP) for (k, v) in calls.items()}
calls_total_sum_IPP = {k: sum(v.totalTimesIPP) for (k, v) in calls.items()}
calls_self_sum_OpenCL = {k: sum(v.selfTimesOpenCL) for (k, v) in calls.items()}
calls_total_sum_OpenCL = {k: sum(v.totalTimesOpenCL) for (k, v) in calls.items()}
if max_entries > 0 and len(calls_sorted) > max_entries:
calls_sorted = calls_sorted[:max_entries]
def formatPercents(p):
if p is not None:
return "{:>3d}".format(int(p*100))
return ''
name_width = 70
timestamp_width = 12
def fmtTS():
return '{:>' + str(timestamp_width) + '}'
fmt = "{:>3} {:<"+str(name_width)+"} {:>8} {:>3}"+((' '+fmtTS())*5)+((' '+fmtTS()+' {:>3}')*2)
fmt2 = "{:>3} {:<"+str(name_width)+"} {:>8} {:>3}"+((' '+fmtTS())*5)+((' '+fmtTS()+' {:>3}')*2)
print(fmt.format("ID", "name", "count", "thr", "min", "max", "median", "avg", "*self*", "IPP", "%", "OpenCL", "%"))
print(fmt2.format("", "", "", "", "t-min", "t-max", "t-median", "t-avg", "total", "t-IPP", "%", "t-OpenCL", "%"))
for (index, callID) in enumerate(calls_sorted):
call_self_times = calls[callID].selfTimes
loc0 = self.locations[callID[0]]
loc_array = [] # [str(callID)]
for (i, l) in enumerate(callID):
loc = self.locations[l]
loc_array.append(loc.name if i > 0 else str(loc))
loc_str = '|'.join(loc_array)
if len(loc_str) > name_width: loc_str = loc_str[:name_width-3]+'...'
print(fmt.format(index + 1, loc_str, len(call_self_times),
len(calls[callID].threads),
formatTimestamp(min(call_self_times)),
formatTimestamp(max(call_self_times)),
formatTimestamp(calls_median[callID]),
formatTimestamp(sum(call_self_times)/float(len(call_self_times))),
formatTimestamp(sum(call_self_times)),
formatTimestamp(calls_self_sum_IPP[callID]),
formatPercents(calls_self_sum_IPP[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None),
formatTimestamp(calls_self_sum_OpenCL[callID]),
formatPercents(calls_self_sum_OpenCL[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None),
))
call_total_times = calls[callID].totalTimes
print(fmt2.format("", "", "", "",
formatTimestamp(min(call_total_times)),
formatTimestamp(max(call_total_times)),
formatTimestamp(median(call_total_times)),
formatTimestamp(sum(call_total_times)/float(len(call_total_times))),
formatTimestamp(sum(call_total_times)),
formatTimestamp(calls_total_sum_IPP[callID]),
formatPercents(calls_total_sum_IPP[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None),
formatTimestamp(calls_total_sum_OpenCL[callID]),
formatPercents(calls_total_sum_OpenCL[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None),
))
print()
if __name__ == "__main__":
tracefile = sys.argv[1] if len(sys.argv) > 1 else 'OpenCVTrace.txt'
count = int(sys.argv[2]) if len(sys.argv) > 2 else 10
trace = Trace(tracefile)
trace.process()
trace.dump(max_entries = count)
print("OK")

View File

@@ -0,0 +1,374 @@
#!/usr/bin/env python
"""
This script can generate XLS reports from OpenCV tests' XML output files.
To use it, first, create a directory for each machine you ran tests on.
Each such directory will become a sheet in the report. Put each XML file
into the corresponding directory.
Then, create your configuration file(s). You can have a global configuration
file (specified with the -c option), and per-sheet configuration files, which
must be called sheet.conf and placed in the directory corresponding to the sheet.
The settings in the per-sheet configuration file will override those in the
global configuration file, if both are present.
A configuration file must consist of a Python dictionary. The following keys
will be recognized:
* 'comparisons': [{'from': string, 'to': string}]
List of configurations to compare performance between. For each item,
the sheet will have a column showing speedup from configuration named
'from' to configuration named "to".
* 'configuration_matchers': [{'properties': {string: object}, 'name': string}]
Instructions for matching test run property sets to configuration names.
For each found XML file:
1) All attributes of the root element starting with the prefix 'cv_' are
placed in a dictionary, with the cv_ prefix stripped and the cv_module_name
element deleted.
2) The first matcher for which the XML's file property set contains the same
keys with equal values as its 'properties' dictionary is searched for.
A missing property can be matched by using None as the value.
Corollary 1: you should place more specific matchers before less specific
ones.
Corollary 2: an empty 'properties' dictionary matches every property set.
3) If a matching matcher is found, its 'name' string is presumed to be the name
of the configuration the XML file corresponds to. A warning is printed if
two different property sets match to the same configuration name.
4) If a such a matcher isn't found, if --include-unmatched was specified, the
configuration name is assumed to be the relative path from the sheet's
directory to the XML file's containing directory. If the XML file isinstance
directly inside the sheet's directory, the configuration name is instead
a dump of all its properties. If --include-unmatched wasn't specified,
the XML file is ignored and a warning is printed.
* 'configurations': [string]
List of names for compile-time and runtime configurations of OpenCV.
Each item will correspond to a column of the sheet.
* 'module_colors': {string: string}
Mapping from module name to color name. In the sheet, cells containing module
names from this mapping will be colored with the corresponding color. You can
find the list of available colors here:
<http://www.simplistix.co.uk/presentations/python-excel.pdf>.
* 'sheet_name': string
Name for the sheet. If this parameter is missing, the name of sheet's directory
will be used.
* 'sheet_properties': [(string, string)]
List of arbitrary (key, value) pairs that somehow describe the sheet. Will be
dumped into the first row of the sheet in string form.
Note that all keys are optional, although to get useful results, you'll want to
specify at least 'configurations' and 'configuration_matchers'.
Finally, run the script. Use the --help option for usage information.
"""
from __future__ import division
import ast
import errno
import fnmatch
import logging
import numbers
import os, os.path
import re
from argparse import ArgumentParser
from glob import glob
from itertools import ifilter
import xlwt
from testlog_parser import parseLogFile
re_image_size = re.compile(r'^ \d+ x \d+$', re.VERBOSE)
re_data_type = re.compile(r'^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE)
time_style = xlwt.easyxf(num_format_str='#0.00')
no_time_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25')
failed_style = xlwt.easyxf('pattern: pattern solid, fore_color red')
noimpl_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
style_dict = {"failed": failed_style, "noimpl":noimpl_style}
speedup_style = time_style
good_speedup_style = xlwt.easyxf('font: color green', num_format_str='#0.00')
bad_speedup_style = xlwt.easyxf('font: color red', num_format_str='#0.00')
no_speedup_style = no_time_style
error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True')
subheader_style = xlwt.easyxf('alignment: horizontal centre, vertical top')
class Collector(object):
def __init__(self, config_match_func, include_unmatched):
self.__config_cache = {}
self.config_match_func = config_match_func
self.include_unmatched = include_unmatched
self.tests = {}
self.extra_configurations = set()
# Format a sorted sequence of pairs as if it was a dictionary.
# We can't just use a dictionary instead, since we want to preserve the sorted order of the keys.
@staticmethod
def __format_config_cache_key(pairs, multiline=False):
return (
('{\n' if multiline else '{') +
(',\n' if multiline else ', ').join(
(' ' if multiline else '') + repr(k) + ': ' + repr(v) for (k, v) in pairs) +
('\n}\n' if multiline else '}')
)
def collect_from(self, xml_path, default_configuration):
run = parseLogFile(xml_path)
module = run.properties['module_name']
properties = run.properties.copy()
del properties['module_name']
props_key = tuple(sorted(properties.iteritems())) # dicts can't be keys
if props_key in self.__config_cache:
configuration = self.__config_cache[props_key]
else:
configuration = self.config_match_func(properties)
if configuration is None:
if self.include_unmatched:
if default_configuration is not None:
configuration = default_configuration
else:
configuration = Collector.__format_config_cache_key(props_key, multiline=True)
self.extra_configurations.add(configuration)
else:
logging.warning('failed to match properties to a configuration: %s',
Collector.__format_config_cache_key(props_key))
else:
same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration]
if len(same_config_props) > 0:
logging.warning('property set %s matches the same configuration %r as property set %s',
Collector.__format_config_cache_key(props_key),
configuration,
Collector.__format_config_cache_key(same_config_props[0]))
self.__config_cache[props_key] = configuration
if configuration is None: return
module_tests = self.tests.setdefault(module, {})
for test in run.tests:
test_results = module_tests.setdefault((test.shortName(), test.param()), {})
new_result = test.get("gmean") if test.status == 'run' else test.status
test_results[configuration] = min(
test_results.get(configuration), new_result,
key=lambda r: (1, r) if isinstance(r, numbers.Number) else
(2,) if r is not None else
(3,)
) # prefer lower result; prefer numbers to errors and errors to nothing
def make_match_func(matchers):
def match_func(properties):
for matcher in matchers:
if all(properties.get(name) == value
for (name, value) in matcher['properties'].iteritems()):
return matcher['name']
return None
return match_func
def main():
arg_parser = ArgumentParser(description='Build an XLS performance report.')
arg_parser.add_argument('sheet_dirs', nargs='+', metavar='DIR', help='directory containing perf test logs')
arg_parser.add_argument('-o', '--output', metavar='XLS', default='report.xls', help='name of output file')
arg_parser.add_argument('-c', '--config', metavar='CONF', help='global configuration file')
arg_parser.add_argument('--include-unmatched', action='store_true',
help='include results from XML files that were not recognized by configuration matchers')
arg_parser.add_argument('--show-times-per-pixel', action='store_true',
help='for tests that have an image size parameter, show per-pixel time, as well as total time')
args = arg_parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
if args.config is not None:
with open(args.config) as global_conf_file:
global_conf = ast.literal_eval(global_conf_file.read())
else:
global_conf = {}
wb = xlwt.Workbook()
for sheet_path in args.sheet_dirs:
try:
with open(os.path.join(sheet_path, 'sheet.conf')) as sheet_conf_file:
sheet_conf = ast.literal_eval(sheet_conf_file.read())
except IOError as ioe:
if ioe.errno != errno.ENOENT: raise
sheet_conf = {}
logging.debug('no sheet.conf for %s', sheet_path)
sheet_conf = dict(global_conf.items() + sheet_conf.items())
config_names = sheet_conf.get('configurations', [])
config_matchers = sheet_conf.get('configuration_matchers', [])
collector = Collector(make_match_func(config_matchers), args.include_unmatched)
for root, _, filenames in os.walk(sheet_path):
logging.info('looking in %s', root)
for filename in fnmatch.filter(filenames, '*.xml'):
if os.path.normpath(sheet_path) == os.path.normpath(root):
default_conf = None
else:
default_conf = os.path.relpath(root, sheet_path)
collector.collect_from(os.path.join(root, filename), default_conf)
config_names.extend(sorted(collector.extra_configurations - set(config_names)))
sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path))))
sheet_properties = sheet_conf.get('sheet_properties', [])
sheet.write(0, 0, 'Properties:')
sheet.write(0, 1,
'N/A' if len(sheet_properties) == 0 else
' '.join(str(k) + '=' + repr(v) for (k, v) in sheet_properties))
sheet.row(2).height = 800
sheet.panes_frozen = True
sheet.remove_splits = True
sheet_comparisons = sheet_conf.get('comparisons', [])
row = 2
col = 0
for (w, caption) in [
(2500, 'Module'),
(10000, 'Test'),
(2000, 'Image\nwidth'),
(2000, 'Image\nheight'),
(2000, 'Data\ntype'),
(7500, 'Other parameters')]:
sheet.col(col).width = w
if args.show_times_per_pixel:
sheet.write_merge(row, row + 1, col, col, caption, header_style)
else:
sheet.write(row, col, caption, header_style)
col += 1
for config_name in config_names:
if args.show_times_per_pixel:
sheet.col(col).width = 3000
sheet.col(col + 1).width = 3000
sheet.write_merge(row, row, col, col + 1, config_name, header_style)
sheet.write(row + 1, col, 'total, ms', subheader_style)
sheet.write(row + 1, col + 1, 'per pixel, ns', subheader_style)
col += 2
else:
sheet.col(col).width = 4000
sheet.write(row, col, config_name, header_style)
col += 1
col += 1 # blank column between configurations and comparisons
for comp in sheet_comparisons:
sheet.col(col).width = 4000
caption = comp['to'] + '\nvs\n' + comp['from']
if args.show_times_per_pixel:
sheet.write_merge(row, row + 1, col, col, caption, header_style)
else:
sheet.write(row, col, caption, header_style)
col += 1
row += 2 if args.show_times_per_pixel else 1
sheet.horz_split_pos = row
sheet.horz_split_first_visible = row
module_colors = sheet_conf.get('module_colors', {})
module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color))
for module, color in module_colors.iteritems()}
for module, tests in sorted(collector.tests.iteritems()):
for ((test, param), configs) in sorted(tests.iteritems()):
sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style))
sheet.write(row, 1, test)
param_list = param[1:-1].split(', ') if param.startswith('(') and param.endswith(')') else [param]
image_size = next(ifilter(re_image_size.match, param_list), None)
if image_size is not None:
(image_width, image_height) = map(int, image_size.split('x', 1))
sheet.write(row, 2, image_width)
sheet.write(row, 3, image_height)
del param_list[param_list.index(image_size)]
data_type = next(ifilter(re_data_type.match, param_list), None)
if data_type is not None:
sheet.write(row, 4, data_type)
del param_list[param_list.index(data_type)]
sheet.row(row).write(5, ' | '.join(param_list))
col = 6
for c in config_names:
if c in configs:
sheet.write(row, col, configs[c], style_dict.get(configs[c], time_style))
else:
sheet.write(row, col, None, no_time_style)
col += 1
if args.show_times_per_pixel:
sheet.write(row, col,
xlwt.Formula('{0} * 1000000 / ({1} * {2})'.format(
xlwt.Utils.rowcol_to_cell(row, col - 1),
xlwt.Utils.rowcol_to_cell(row, 2),
xlwt.Utils.rowcol_to_cell(row, 3)
)),
time_style
)
col += 1
col += 1 # blank column
for comp in sheet_comparisons:
cmp_from = configs.get(comp["from"])
cmp_to = configs.get(comp["to"])
if isinstance(cmp_from, numbers.Number) and isinstance(cmp_to, numbers.Number):
try:
speedup = cmp_from / cmp_to
sheet.write(row, col, speedup, good_speedup_style if speedup > 1.1 else
bad_speedup_style if speedup < 0.9 else
speedup_style)
except ArithmeticError as e:
sheet.write(row, col, None, error_speedup_style)
else:
sheet.write(row, col, None, no_speedup_style)
col += 1
row += 1
if row % 1000 == 0: sheet.flush_row_data()
wb.save(args.output)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,321 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/ts/cuda_perf.hpp"
#include "opencv2/core/cuda.hpp"
using namespace cv;
using namespace std;
namespace perf
{
Mat readImage(const string& fileName, int flags)
{
return imread(perf::TestBase::getDataPath(fileName), flags);
}
void PrintTo(const CvtColorInfo& info, std::ostream* os)
{
static const char* str[] =
{
"BGR2BGRA",
"BGRA2BGR",
"BGR2RGBA",
"RGBA2BGR",
"BGR2RGB",
"BGRA2RGBA",
"BGR2GRAY",
"RGB2GRAY",
"GRAY2BGR",
"GRAY2BGRA",
"BGRA2GRAY",
"RGBA2GRAY",
"BGR2BGR565",
"RGB2BGR565",
"BGR5652BGR",
"BGR5652RGB",
"BGRA2BGR565",
"RGBA2BGR565",
"BGR5652BGRA",
"BGR5652RGBA",
"GRAY2BGR565",
"BGR5652GRAY",
"BGR2BGR555",
"RGB2BGR555",
"BGR5552BGR",
"BGR5552RGB",
"BGRA2BGR555",
"RGBA2BGR555",
"BGR5552BGRA",
"BGR5552RGBA",
"GRAY2BGR555",
"BGR5552GRAY",
"BGR2XYZ",
"RGB2XYZ",
"XYZ2BGR",
"XYZ2RGB",
"BGR2YCrCb",
"RGB2YCrCb",
"YCrCb2BGR",
"YCrCb2RGB",
"BGR2HSV",
"RGB2HSV",
"",
"",
"BGR2Lab",
"RGB2Lab",
"BayerBG2BGR",
"BayerGB2BGR",
"BayerRG2BGR",
"BayerGR2BGR",
"BGR2Luv",
"RGB2Luv",
"BGR2HLS",
"RGB2HLS",
"HSV2BGR",
"HSV2RGB",
"Lab2BGR",
"Lab2RGB",
"Luv2BGR",
"Luv2RGB",
"HLS2BGR",
"HLS2RGB",
"BayerBG2BGR_VNG",
"BayerGB2BGR_VNG",
"BayerRG2BGR_VNG",
"BayerGR2BGR_VNG",
"BGR2HSV_FULL",
"RGB2HSV_FULL",
"BGR2HLS_FULL",
"RGB2HLS_FULL",
"HSV2BGR_FULL",
"HSV2RGB_FULL",
"HLS2BGR_FULL",
"HLS2RGB_FULL",
"LBGR2Lab",
"LRGB2Lab",
"LBGR2Luv",
"LRGB2Luv",
"Lab2LBGR",
"Lab2LRGB",
"Luv2LBGR",
"Luv2LRGB",
"BGR2YUV",
"RGB2YUV",
"YUV2BGR",
"YUV2RGB",
"BayerBG2GRAY",
"BayerGB2GRAY",
"BayerRG2GRAY",
"BayerGR2GRAY",
//YUV 4:2:0 formats family
"YUV2RGB_NV12",
"YUV2BGR_NV12",
"YUV2RGB_NV21",
"YUV2BGR_NV21",
"YUV2RGBA_NV12",
"YUV2BGRA_NV12",
"YUV2RGBA_NV21",
"YUV2BGRA_NV21",
"YUV2RGB_YV12",
"YUV2BGR_YV12",
"YUV2RGB_IYUV",
"YUV2BGR_IYUV",
"YUV2RGBA_YV12",
"YUV2BGRA_YV12",
"YUV2RGBA_IYUV",
"YUV2BGRA_IYUV",
"YUV2GRAY_420",
//YUV 4:2:2 formats family
"YUV2RGB_UYVY",
"YUV2BGR_UYVY",
"YUV2RGB_VYUY",
"YUV2BGR_VYUY",
"YUV2RGBA_UYVY",
"YUV2BGRA_UYVY",
"YUV2RGBA_VYUY",
"YUV2BGRA_VYUY",
"YUV2RGB_YUY2",
"YUV2BGR_YUY2",
"YUV2RGB_YVYU",
"YUV2BGR_YVYU",
"YUV2RGBA_YUY2",
"YUV2BGRA_YUY2",
"YUV2RGBA_YVYU",
"YUV2BGRA_YVYU",
"YUV2GRAY_UYVY",
"YUV2GRAY_YUY2",
// alpha premultiplication
"RGBA2mRGBA",
"mRGBA2RGBA",
"COLORCVT_MAX"
};
*os << str[info.code];
}
static void printOsInfo()
{
#if defined _WIN32
# if defined _WIN64
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined __ANDROID__
# if defined _LP64 || defined __LP64__
printf("[----------]\n[ GPU INFO ] \tRun on OS Android x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Android x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined __APPLE__
# if defined _LP64 || defined __LP64__
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined linux
# if defined _LP64 || defined __LP64__
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x32.\n[----------]\n"), fflush(stdout);
# endif
#endif
}
void printCudaInfo()
{
printOsInfo();
for (int i = 0; i < cv::cuda::getCudaEnabledDeviceCount(); i++)
cv::cuda::printCudaDeviceInfo(i);
}
struct KeypointIdxCompare
{
std::vector<cv::KeyPoint>* keypoints;
explicit KeypointIdxCompare(std::vector<cv::KeyPoint>* _keypoints) : keypoints(_keypoints) {}
bool operator ()(size_t i1, size_t i2) const
{
cv::KeyPoint kp1 = (*keypoints)[i1];
cv::KeyPoint kp2 = (*keypoints)[i2];
if (kp1.pt.x != kp2.pt.x)
return kp1.pt.x < kp2.pt.x;
if (kp1.pt.y != kp2.pt.y)
return kp1.pt.y < kp2.pt.y;
if (kp1.response != kp2.response)
return kp1.response < kp2.response;
return kp1.octave < kp2.octave;
}
};
void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors)
{
std::vector<size_t> indexies(keypoints.size());
for (size_t i = 0; i < indexies.size(); ++i)
indexies[i] = i;
std::sort(indexies.begin(), indexies.end(), KeypointIdxCompare(&keypoints));
std::vector<cv::KeyPoint> new_keypoints;
cv::Mat new_descriptors;
new_keypoints.resize(keypoints.size());
cv::Mat descriptors;
if (_descriptors.needed())
{
descriptors = _descriptors.getMat();
new_descriptors.create(descriptors.size(), descriptors.type());
}
for (size_t i = 0; i < indexies.size(); ++i)
{
size_t new_idx = indexies[i];
new_keypoints[i] = keypoints[new_idx];
if (!new_descriptors.empty())
descriptors.row((int) new_idx).copyTo(new_descriptors.row((int) i));
}
keypoints.swap(new_keypoints);
if (_descriptors.needed())
new_descriptors.copyTo(_descriptors);
}
}

View File

@@ -0,0 +1,560 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/ts/cuda_test.hpp"
#include <stdexcept>
using namespace cv;
using namespace cv::cuda;
using namespace cvtest;
using namespace testing;
using namespace testing::internal;
namespace perf
{
void printCudaInfo();
}
namespace cvtest
{
//////////////////////////////////////////////////////////////////////
// random generators
int randomInt(int minVal, int maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
double randomDouble(double minVal, double maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
Size randomSize(int minVal, int maxVal)
{
return Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
}
Scalar randomScalar(double minVal, double maxVal)
{
return Scalar(randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal));
}
Mat randomMat(Size size, int type, double minVal, double maxVal)
{
return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false);
}
//////////////////////////////////////////////////////////////////////
// GpuMat create
GpuMat createMat(Size size, int type, bool useRoi)
{
Size size0 = size;
if (useRoi)
{
size0.width += randomInt(5, 15);
size0.height += randomInt(5, 15);
}
GpuMat d_m(size0, type);
if (size0 != size)
d_m = d_m(Rect((size0.width - size.width) / 2, (size0.height - size.height) / 2, size.width, size.height));
return d_m;
}
GpuMat loadMat(const Mat& m, bool useRoi)
{
GpuMat d_m = createMat(m.size(), m.type(), useRoi);
d_m.upload(m);
return d_m;
}
//////////////////////////////////////////////////////////////////////
// Image load
Mat readImage(const std::string& fileName, int flags)
{
return imread(TS::ptr()->get_data_path() + fileName, flags);
}
Mat readImageType(const std::string& fname, int type)
{
Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cvtColor(src, temp, COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type), CV_MAT_DEPTH(type) == CV_32F ? 1.0 / 255.0 : 1.0);
return src;
}
//////////////////////////////////////////////////////////////////////
// Gpu devices
bool supportFeature(const DeviceInfo& info, FeatureSet feature)
{
return TargetArchs::builtWith(feature) && info.supports(feature);
}
DeviceManager& DeviceManager::instance()
{
static DeviceManager obj;
return obj;
}
void DeviceManager::load(int i)
{
devices_.clear();
devices_.reserve(1);
std::ostringstream msg;
if (i < 0 || i >= getCudaEnabledDeviceCount())
{
msg << "Incorrect device number - " << i;
throw std::runtime_error(msg.str());
}
DeviceInfo info(i);
if (!info.isCompatible())
{
msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current CUDA module build";
throw std::runtime_error(msg.str());
}
devices_.push_back(info);
}
void DeviceManager::loadAll()
{
int deviceCount = getCudaEnabledDeviceCount();
devices_.clear();
devices_.reserve(deviceCount);
for (int i = 0; i < deviceCount; ++i)
{
DeviceInfo info(i);
if (info.isCompatible())
{
devices_.push_back(info);
}
}
}
void parseCudaDeviceOptions(int argc, char **argv)
{
cv::CommandLineParser cmd(argc, argv,
"{ cuda_device | -1 | CUDA device on which tests will be executed (-1 means all devices) }"
"{ h help | false | Print help info }"
);
if (cmd.has("help"))
{
std::cout << "\nAvailable options besides google test option: \n";
cmd.printMessage();
}
int device = cmd.get<int>("cuda_device");
if (device < 0)
{
cvtest::DeviceManager::instance().loadAll();
std::cout << "Run tests on all supported CUDA devices \n" << std::endl;
}
else
{
cvtest::DeviceManager::instance().load(device);
cv::cuda::DeviceInfo info(device);
std::cout << "Run tests on CUDA device " << device << " [" << info.name() << "] \n" << std::endl;
}
}
//////////////////////////////////////////////////////////////////////
// Additional assertion
namespace
{
template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)
{
const int cn = m.channels();
std::ostringstream ostr;
ostr << "(";
p.x /= cn;
ostr << static_cast<OutT>(m.at<T>(p.y, p.x * cn));
for (int c = 1; c < m.channels(); ++c)
{
ostr << ", " << static_cast<OutT>(m.at<T>(p.y, p.x * cn + c));
}
ostr << ")";
return ostr.str();
}
std::string printMatVal(const Mat& m, Point p)
{
typedef std::string (*func_t)(const Mat& m, Point p);
static const func_t funcs[] =
{
printMatValImpl<uchar, int>, printMatValImpl<schar, int>, printMatValImpl<ushort, int>, printMatValImpl<short, int>,
printMatValImpl<int, int>, printMatValImpl<float, float>, printMatValImpl<double, double>
};
return funcs[m.depth()](m, p);
}
}
void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minLoc_, Point* maxLoc_, const Mat& mask)
{
if (src.depth() != CV_8S)
{
minMaxLoc(src, minVal_, maxVal_, minLoc_, maxLoc_, mask);
return;
}
// OpenCV's minMaxLoc doesn't support CV_8S type
double minVal = std::numeric_limits<double>::max();
Point minLoc(-1, -1);
double maxVal = -std::numeric_limits<double>::max();
Point maxLoc(-1, -1);
for (int y = 0; y < src.rows; ++y)
{
const schar* src_row = src.ptr<schar>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<uchar>(y);
for (int x = 0; x < src.cols; ++x)
{
if (!mask_row || mask_row[x])
{
schar val = src_row[x];
if (val < minVal)
{
minVal = val;
minLoc = cv::Point(x, y);
}
if (val > maxVal)
{
maxVal = val;
maxLoc = cv::Point(x, y);
}
}
}
}
if (minVal_) *minVal_ = minVal;
if (maxVal_) *maxVal_ = maxVal;
if (minLoc_) *minLoc_ = minLoc;
if (maxLoc_) *maxLoc_ = maxLoc;
}
Mat getMat(InputArray arr)
{
if (arr.kind() == _InputArray::CUDA_GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);
return m;
}
return arr.getMat();
}
AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, InputArray m1_, InputArray m2_, double eps)
{
Mat m1 = getMat(m1_);
Mat m2 = getMat(m2_);
if (m1.size() != m2.size())
{
std::stringstream msg;
msg << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different sizes : \""
<< expr1 << "\" [" << PrintToString(m1.size()) << "] vs \""
<< expr2 << "\" [" << PrintToString(m2.size()) << "]";
return AssertionFailure() << msg.str();
}
if (m1.type() != m2.type())
{
std::stringstream msg;
msg << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different types : \""
<< expr1 << "\" [" << PrintToString(MatType(m1.type())) << "] vs \""
<< expr2 << "\" [" << PrintToString(MatType(m2.type())) << "]";
return AssertionFailure() << msg.str();
}
Mat diff;
absdiff(m1.reshape(1), m2.reshape(1), diff);
double maxVal = 0.0;
Point maxLoc;
minMaxLocGold(diff, 0, &maxVal, 0, &maxLoc);
if (maxVal > eps)
{
std::stringstream msg;
msg << "The max difference between matrices \"" << expr1 << "\" and \"" << expr2
<< "\" is " << maxVal << " at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ")"
<< ", which exceeds \"" << eps_expr << "\", where \""
<< expr1 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m1, maxLoc) << ", \""
<< expr2 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m2, maxLoc) << ", \""
<< eps_expr << "\" evaluates to " << eps;
return AssertionFailure() << msg.str();
}
return AssertionSuccess();
}
double checkSimilarity(InputArray m1, InputArray m2)
{
Mat diff;
matchTemplate(getMat(m1), getMat(m2), diff, TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
vector<MatType> v;
v.reserve((depth_end - depth_start + 1) * (cn_end - cn_start + 1));
for (int depth = depth_start; depth <= depth_end; ++depth)
{
for (int cn = cn_start; cn <= cn_end; ++cn)
{
v.push_back(MatType(CV_MAKE_TYPE(depth, cn)));
}
}
return v;
}
const vector<MatType>& all_types()
{
static vector<MatType> v = types(CV_8U, CV_64F, 1, 4);
return v;
}
void PrintTo(const UseRoi& useRoi, std::ostream* os)
{
if (useRoi)
(*os) << "sub matrix";
else
(*os) << "whole matrix";
}
void PrintTo(const Inverse& inverse, std::ostream* os)
{
if (inverse)
(*os) << "inverse";
else
(*os) << "direct";
}
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const Mat& image)
{
imwrite(TS::ptr()->get_data_path() + fileName, image);
}
void showDiff(InputArray gold_, InputArray actual_, double eps)
{
Mat gold = getMat(gold_);
Mat actual = getMat(actual_);
Mat diff;
absdiff(gold, actual, diff);
threshold(diff, diff, eps, 255.0, cv::THRESH_BINARY);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
waitKey();
}
namespace
{
bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
{
const double maxPtDif = 1.0;
const double maxSizeDif = 1.0;
const double maxAngleDif = 2.0;
const double maxResponseDif = 0.1;
double dist = cv::norm(p1.pt - p2.pt);
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
{
return true;
}
return false;
}
struct KeyPointLess
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return kp1.pt.y < kp2.pt.y || (kp1.pt.y == kp2.pt.y && kp1.pt.x < kp2.pt.x);
}
};
}
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
if (gold.size() != actual.size())
{
std::stringstream msg;
msg << "KeyPoints size mistmach\n"
<< "\"" << gold_expr << "\" : " << gold.size() << "\n"
<< "\"" << actual_expr << "\" : " << actual.size();
return AssertionFailure() << msg.str();
}
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (!keyPointsEquals(p1, p2))
{
std::stringstream msg;
msg << "KeyPoints differ at " << i << "\n"
<< "\"" << gold_expr << "\" vs \"" << actual_expr << "\" : \n"
<< "pt : " << testing::PrintToString(p1.pt) << " vs " << testing::PrintToString(p2.pt) << "\n"
<< "size : " << p1.size << " vs " << p2.size << "\n"
<< "angle : " << p1.angle << " vs " << p2.angle << "\n"
<< "response : " << p1.response << " vs " << p2.response << "\n"
<< "octave : " << p1.octave << " vs " << p2.octave << "\n"
<< "class_id : " << p1.class_id << " vs " << p2.class_id;
return AssertionFailure() << msg.str();
}
}
return ::testing::AssertionSuccess();
}
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
void printCudaInfo()
{
perf::printCudaInfo();
}
}
void cv::cuda::PrintTo(const DeviceInfo& info, std::ostream* os)
{
(*os) << info.name();
if (info.deviceID())
(*os) << " [ID: " << info.deviceID() << "]";
}

View File

@@ -0,0 +1,83 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
namespace cvtest {
namespace ocl {
namespace perf {
void checkDeviceMaxMemoryAllocSize(const Size& size, int type, int factor)
{
assert(factor > 0);
if (!cv::ocl::useOpenCL())
return;
size_t memSize = size.area() * CV_ELEM_SIZE(type);
const cv::ocl::Device& dev = cv::ocl::Device::getDefault();
if (memSize * factor >= dev.maxMemAllocSize())
throw ::perf::TestBase::PerfSkipTestException();
}
void randu(InputOutputArray dst)
{
if (dst.depth() == CV_8U)
cv::randu(dst, 0, 256);
else if (dst.depth() == CV_8S)
cv::randu(dst, -128, 128);
else if (dst.depth() == CV_16U)
cv::randu(dst, 0, 1024);
else if (dst.depth() == CV_32F || dst.depth() == CV_64F)
cv::randu(dst, -1.0, 1.0);
else if (dst.depth() == CV_16S || dst.depth() == CV_32S)
cv::randu(dst, -4096, 4096);
else
CV_Error(Error::StsUnsupportedFormat, "Unsupported format");
}
} // namespace perf
} } // namespace cvtest::ocl

162
modules/ts/src/ocl_test.cpp Normal file
View File

@@ -0,0 +1,162 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
namespace cvtest {
namespace ocl {
using namespace cv;
int test_loop_times = 1; // TODO Read from command line / environment
Mat TestUtils::readImage(const String &fileName, int flags)
{
return cv::imread(cvtest::TS::ptr()->get_data_path() + fileName, flags);
}
Mat TestUtils::readImageType(const String &fname, int type)
{
Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cv::cvtColor(src, temp, cv::COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type));
return src;
}
double TestUtils::checkNorm1(InputArray m, InputArray mask)
{
return cvtest::norm(m.getMat(), NORM_INF, mask.getMat());
}
double TestUtils::checkNorm2(InputArray m1, InputArray m2, InputArray mask)
{
return cvtest::norm(m1.getMat(), m2.getMat(), NORM_INF, mask.getMat());
}
double TestUtils::checkSimilarity(InputArray m1, InputArray m2)
{
Mat diff;
matchTemplate(m1.getMat(), m2.getMat(), diff, CV_TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
double TestUtils::checkRectSimilarity(const Size & sz, std::vector<Rect>& ob1, std::vector<Rect>& ob2)
{
double final_test_result = 0.0;
size_t sz1 = ob1.size();
size_t sz2 = ob2.size();
if (sz1 != sz2)
return sz1 > sz2 ? (double)(sz1 - sz2) : (double)(sz2 - sz1);
else
{
if (sz1 == 0 && sz2 == 0)
return 0;
cv::Mat cpu_result(sz, CV_8UC1);
cpu_result.setTo(0);
for (vector<Rect>::const_iterator r = ob1.begin(); r != ob1.end(); ++r)
{
cv::Mat cpu_result_roi(cpu_result, *r);
cpu_result_roi.setTo(1);
cpu_result.copyTo(cpu_result);
}
int cpu_area = cv::countNonZero(cpu_result > 0);
cv::Mat gpu_result(sz, CV_8UC1);
gpu_result.setTo(0);
for(vector<Rect>::const_iterator r2 = ob2.begin(); r2 != ob2.end(); ++r2)
{
cv::Mat gpu_result_roi(gpu_result, *r2);
gpu_result_roi.setTo(1);
gpu_result.copyTo(gpu_result);
}
cv::Mat result_;
multiply(cpu_result, gpu_result, result_);
int result = cv::countNonZero(result_ > 0);
if (cpu_area!=0 && result!=0)
final_test_result = 1.0 - (double)result/(double)cpu_area;
else if(cpu_area==0 && result!=0)
final_test_result = -1;
}
return final_test_result;
}
void TestUtils::showDiff(InputArray _src, InputArray _gold, InputArray _actual, double eps, bool alwaysShow)
{
Mat src = _src.getMat(), actual = _actual.getMat(), gold = _gold.getMat();
Mat diff, diff_thresh;
absdiff(gold, actual, diff);
diff.convertTo(diff, CV_32F);
threshold(diff, diff_thresh, eps, 255.0, cv::THRESH_BINARY);
if (alwaysShow || cv::countNonZero(diff_thresh.reshape(1)) > 0)
{
#if 0
std::cout << "Source: " << std::endl << src << std::endl;
std::cout << "Expected: " << std::endl << gold << std::endl;
std::cout << "Actual: " << std::endl << actual << std::endl;
#endif
namedWindow("src", WINDOW_NORMAL);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("src", src);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
cv::waitKey();
}
}
} } // namespace cvtest::ocl

View File

@@ -0,0 +1,8 @@
#include "opencv2/ts.hpp"
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#ifdef GTEST_LINKED_AS_SHARED_LIBRARY
#error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined
#endif

1146
modules/ts/src/ts.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,337 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
namespace cvtest
{
static const int default_test_case_count = 500;
static const int default_max_log_array_size = 9;
ArrayTest::ArrayTest()
{
test_case_count = default_test_case_count;
iplimage_allowed = true;
cvmat_allowed = true;
optional_mask = false;
min_log_array_size = 0;
max_log_array_size = default_max_log_array_size;
element_wise_relative_error = true;
test_array.resize(MAX_ARR);
}
ArrayTest::~ArrayTest()
{
clear();
}
void ArrayTest::clear()
{
for( size_t i = 0; i < test_array.size(); i++ )
{
for( size_t j = 0; j < test_array[i].size(); j++ )
cvRelease( &test_array[i][j] );
}
BaseTest::clear();
}
int ArrayTest::read_params( const cv::FileStorage& fs )
{
int code = BaseTest::read_params( fs );
if( code < 0 )
return code;
read( find_param( fs, "min_log_array_size" ), min_log_array_size, min_log_array_size );
read( find_param( fs, "max_log_array_size" ), max_log_array_size, max_log_array_size );
read( find_param( fs, "test_case_count" ), test_case_count, test_case_count );
test_case_count = cvRound( test_case_count*ts->get_test_case_count_scale() );
min_log_array_size = clipInt( min_log_array_size, 0, 20 );
max_log_array_size = clipInt( max_log_array_size, min_log_array_size, 20 );
test_case_count = clipInt( test_case_count, 0, 100000 );
return code;
}
void ArrayTest::get_test_array_types_and_sizes( int /*test_case_idx*/, vector<vector<Size> >& sizes, vector<vector<int> >& types )
{
RNG& rng = ts->get_rng();
Size size;
double val;
size_t i, j;
val = randReal(rng) * (max_log_array_size - min_log_array_size) + min_log_array_size;
size.width = cvRound( exp(val*CV_LOG2) );
val = randReal(rng) * (max_log_array_size - min_log_array_size) + min_log_array_size;
size.height = cvRound( exp(val*CV_LOG2) );
for( i = 0; i < test_array.size(); i++ )
{
size_t sizei = test_array[i].size();
for( j = 0; j < sizei; j++ )
{
sizes[i][j] = size;
types[i][j] = CV_8UC1;
}
}
}
static const unsigned int icvTsTypeToDepth[] =
{
IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16U, IPL_DEPTH_16S,
IPL_DEPTH_32S, IPL_DEPTH_32F, IPL_DEPTH_64F
};
int ArrayTest::prepare_test_case( int test_case_idx )
{
int code = 1;
size_t max_arr = test_array.size();
vector<vector<Size> > sizes(max_arr);
vector<vector<Size> > whole_sizes(max_arr);
vector<vector<int> > types(max_arr);
size_t i, j;
RNG& rng = ts->get_rng();
bool is_image = false;
for( i = 0; i < max_arr; i++ )
{
size_t sizei = std::max(test_array[i].size(), (size_t)1);
sizes[i].resize(sizei);
types[i].resize(sizei);
whole_sizes[i].resize(sizei);
}
get_test_array_types_and_sizes( test_case_idx, sizes, types );
for( i = 0; i < max_arr; i++ )
{
size_t sizei = test_array[i].size();
for( j = 0; j < sizei; j++ )
{
unsigned t = randInt(rng);
bool create_mask = true, use_roi = false;
CvSize size = cvSize(sizes[i][j]), whole_size = size;
CvRect roi = CV_STRUCT_INITIALIZER;
is_image = !cvmat_allowed ? true : iplimage_allowed ? (t & 1) != 0 : false;
create_mask = (t & 6) == 0; // ~ each of 3 tests will use mask
use_roi = (t & 8) != 0;
if( use_roi )
{
whole_size.width += randInt(rng) % 10;
whole_size.height += randInt(rng) % 10;
}
cvRelease( &test_array[i][j] );
if( size.width > 0 && size.height > 0 &&
types[i][j] >= 0 && (i != MASK || create_mask) )
{
if( use_roi )
{
roi.width = size.width;
roi.height = size.height;
if( whole_size.width > size.width )
roi.x = randInt(rng) % (whole_size.width - size.width);
if( whole_size.height > size.height )
roi.y = randInt(rng) % (whole_size.height - size.height);
}
if( is_image )
{
test_array[i][j] = cvCreateImage( whole_size,
icvTsTypeToDepth[CV_MAT_DEPTH(types[i][j])], CV_MAT_CN(types[i][j]) );
if( use_roi )
cvSetImageROI( (IplImage*)test_array[i][j], roi );
}
else
{
test_array[i][j] = cvCreateMat( whole_size.height, whole_size.width, types[i][j] );
if( use_roi )
{
CvMat submat, *mat = (CvMat*)test_array[i][j];
cvGetSubRect( test_array[i][j], &submat, roi );
submat.refcount = mat->refcount;
*mat = submat;
}
}
}
}
}
test_mat.resize(test_array.size());
for( i = 0; i < max_arr; i++ )
{
size_t sizei = test_array[i].size();
test_mat[i].resize(sizei);
for( j = 0; j < sizei; j++ )
{
CvArr* arr = test_array[i][j];
test_mat[i][j] = cv::cvarrToMat(arr);
if( !test_mat[i][j].empty() )
fill_array( test_case_idx, (int)i, (int)j, test_mat[i][j] );
}
}
return code;
}
void ArrayTest::get_minmax_bounds( int i, int /*j*/, int type, Scalar& low, Scalar& high )
{
double l, u;
int depth = CV_MAT_DEPTH(type);
if( i == MASK )
{
l = -2;
u = 2;
}
else if( depth < CV_32S )
{
l = getMinVal(type);
u = getMaxVal(type);
}
else
{
u = depth == CV_32S ? 1000000 : 1000.;
l = -u;
}
low = Scalar::all(l);
high = Scalar::all(u);
}
void ArrayTest::fill_array( int /*test_case_idx*/, int i, int j, Mat& arr )
{
if( i == REF_INPUT_OUTPUT )
cvtest::copy( test_mat[INPUT_OUTPUT][j], arr, Mat() );
else if( i == INPUT || i == INPUT_OUTPUT || i == MASK )
{
Scalar low, high;
get_minmax_bounds( i, j, arr.type(), low, high );
randUni( ts->get_rng(), arr, low, high );
}
}
double ArrayTest::get_success_error_level( int /*test_case_idx*/, int i, int j )
{
int elem_depth = CV_MAT_DEPTH(cvGetElemType(test_array[i][j]));
assert( i == OUTPUT || i == INPUT_OUTPUT );
return elem_depth < CV_32F ? 0 : elem_depth == CV_32F ? FLT_EPSILON*100: DBL_EPSILON*5000;
}
void ArrayTest::prepare_to_validation( int /*test_case_idx*/ )
{
assert(0);
}
int ArrayTest::validate_test_results( int test_case_idx )
{
static const char* arr_names[] = { "input", "input/output", "output",
"ref input/output", "ref output",
"temporary", "mask" };
size_t i, j;
prepare_to_validation( test_case_idx );
for( i = 0; i < 2; i++ )
{
int i0 = i == 0 ? OUTPUT : INPUT_OUTPUT;
int i1 = i == 0 ? REF_OUTPUT : REF_INPUT_OUTPUT;
size_t sizei = test_array[i0].size();
assert( sizei == test_array[i1].size() );
for( j = 0; j < sizei; j++ )
{
double err_level;
int code;
if( !test_array[i1][j] )
continue;
err_level = get_success_error_level( test_case_idx, i0, (int)j );
code = cmpEps2(ts, test_mat[i0][j], test_mat[i1][j], err_level, element_wise_relative_error, arr_names[i0]);
if (code == 0) continue;
for( i0 = 0; i0 < (int)test_array.size(); i0++ )
{
size_t sizei0 = test_array[i0].size();
if( i0 == REF_INPUT_OUTPUT || i0 == OUTPUT || i0 == TEMP )
continue;
for( i1 = 0; i1 < (int)sizei0; i1++ )
{
const Mat& arr = test_mat[i0][i1];
if( !arr.empty() )
{
string sizestr = vec2str(", ", &arr.size[0], arr.dims);
ts->printf( TS::LOG, "%s array %d type=%sC%d, size=(%s)\n",
arr_names[i0], i1, getTypeName(arr.depth()),
arr.channels(), sizestr.c_str() );
}
}
}
ts->set_failed_test_info( code );
return code;
}
}
return 0;
}
}
/* End of file. */

3294
modules/ts/src/ts_func.cpp Normal file

File diff suppressed because it is too large Load Diff

11438
modules/ts/src/ts_gtest.cpp Normal file

File diff suppressed because it is too large Load Diff

2241
modules/ts/src/ts_perf.cpp Normal file

File diff suppressed because it is too large Load Diff

541
modules/ts/src/ts_tags.cpp Normal file
View File

@@ -0,0 +1,541 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "ts_tags.hpp"
namespace cvtest {
static bool printTestTag = false;
static std::vector<std::string> currentDirectTestTags, currentImpliedTestTags;
static std::vector<const ::testing::TestInfo*> skipped_tests;
static std::map<std::string, int>& getTestTagsSkipCounts()
{
static std::map<std::string, int> testTagsSkipCounts;
return testTagsSkipCounts;
}
static std::map<std::string, int>& getTestTagsSkipExtraCounts()
{
static std::map<std::string, int> testTagsSkipExtraCounts;
return testTagsSkipExtraCounts;
}
void testTagIncreaseSkipCount(const std::string& tag, bool isMain, bool appendSkipTests)
{
if (appendSkipTests)
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
std::map<std::string, int>& counts = isMain ? getTestTagsSkipCounts() : getTestTagsSkipExtraCounts();
std::map<std::string, int>::iterator i = counts.find(tag);
if (i == counts.end())
{
counts[tag] = 1;
}
else
{
i->second++;
}
}
static std::vector<std::string>& getTestTagsSkipList()
{
static std::vector<std::string> testSkipWithTags;
static bool initialized = false;
if (!initialized)
{
#if OPENCV_32BIT_CONFIGURATION
testSkipWithTags.push_back(CV_TEST_TAG_MEMORY_2GB);
#else
if (!cvtest::runBigDataTests)
testSkipWithTags.push_back(CV_TEST_TAG_MEMORY_6GB);
#endif
testSkipWithTags.push_back(CV_TEST_TAG_VERYLONG);
#if defined(_DEBUG)
testSkipWithTags.push_back(CV_TEST_TAG_DEBUG_VERYLONG);
#endif
initialized = true;
}
return testSkipWithTags;
}
void registerGlobalSkipTag(const std::string& skipTag)
{
if (skipTag.empty())
return; // do nothing
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (skipTag == skipTags[i])
return; // duplicate
}
skipTags.push_back(skipTag);
}
static std::vector<std::string>& getTestTagsForceList()
{
static std::vector<std::string> getTestTagsForceList;
return getTestTagsForceList;
}
static std::vector<std::string>& getTestTagsRequiredList()
{
static std::vector<std::string> getTestTagsRequiredList;
return getTestTagsRequiredList;
}
class TestTagsListener: public ::testing::EmptyTestEventListener
{
public:
void OnTestProgramStart(const ::testing::UnitTest& /*unit_test*/) CV_OVERRIDE
{
{
const std::vector<std::string>& tags = getTestTagsRequiredList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Run tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags", os_direct.str());
}
{
const std::vector<std::string>& tags = getTestTagsSkipList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Skip tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags_skip", os_direct.str());
}
{
const std::vector<std::string>& tags = getTestTagsForceList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Force tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags_force", os_direct.str());
}
}
void OnTestStart(const ::testing::TestInfo& test_info) CV_OVERRIDE
{
currentDirectTestTags.clear();
currentImpliedTestTags.clear();
const char* value_param_ = test_info.value_param();
if (value_param_)
{
std::string value_param(value_param_);
if (value_param.find("CV_64F") != std::string::npos
|| (value_param.find("64F") != std::string::npos
&& value_param.find(" 64F") != std::string::npos
&& value_param.find(",64F") != std::string::npos
&& value_param.find("(64F") != std::string::npos
)
)
applyTestTag_(CV_TEST_TAG_TYPE_64F);
if (value_param.find("1280x720") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_HD);
if (value_param.find("1920x1080") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_FULLHD);
if (value_param.find("3840x2160") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_4K);
}
}
void OnTestEnd(const ::testing::TestInfo& /*test_info*/) CV_OVERRIDE
{
if (currentDirectTestTags.empty() && currentImpliedTestTags.empty())
{
if (printTestTag) std::cout << "[ TAGS ] No tags" << std::endl;
return;
}
std::ostringstream os;
std::ostringstream os_direct;
std::ostringstream os_implied;
{
const std::vector<std::string>& tags = currentDirectTestTags;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "" : ", ") << tags[i];
os_direct << (i == 0 ? "" : ",") << tags[i];
}
}
if (!currentImpliedTestTags.empty())
{
os << " (implied tags: ";
const std::vector<std::string>& tags = currentImpliedTestTags;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "" : ", ") << tags[i];
os_implied << (i == 0 ? "" : ",") << tags[i];
}
os << ")";
}
if (printTestTag) std::cout << "[ TAGS ] " << os.str() << std::endl;
::testing::Test::RecordProperty("tags", os_direct.str());
::testing::Test::RecordProperty("tags_implied", os_implied.str());
}
void OnTestIterationEnd(const ::testing::UnitTest& /*unit_test*/, int /*iteration*/) CV_OVERRIDE
{
if (!skipped_tests.empty())
{
std::cout << "[ SKIPSTAT ] " << skipped_tests.size() << " tests skipped" << std::endl;
const std::vector<std::string>& skipTags = getTestTagsSkipList();
const std::map<std::string, int>& counts = getTestTagsSkipCounts();
const std::map<std::string, int>& countsExtra = getTestTagsSkipExtraCounts();
std::vector<std::string> skipTags_all = skipTags;
skipTags_all.push_back("skip_bigdata");
skipTags_all.push_back("skip_other");
for (std::vector<std::string>::const_iterator i = skipTags_all.begin(); i != skipTags_all.end(); ++i)
{
int c1 = 0;
std::map<std::string, int>::const_iterator i1 = counts.find(*i);
if (i1 != counts.end()) c1 = i1->second;
int c2 = 0;
std::map<std::string, int>::const_iterator i2 = countsExtra.find(*i);
if (i2 != countsExtra.end()) c2 = i2->second;
if (c2 > 0)
{
std::cout << "[ SKIPSTAT ] TAG='" << *i << "' skip " << c1 << " tests (" << c2 << " times in extra skip list)" << std::endl;
}
else if (c1 > 0)
{
std::cout << "[ SKIPSTAT ] TAG='" << *i << "' skip " << c1 << " tests" << std::endl;
}
}
}
skipped_tests.clear();
}
void OnTestProgramEnd(const ::testing::UnitTest& /*unit_test*/) CV_OVERRIDE
{
/*if (!skipped_tests.empty())
{
for (size_t i = 0; i < skipped_tests.size(); i++)
{
const ::testing::TestInfo* test_info = skipped_tests[i];
if (!test_info) continue;
std::cout << "- " << test_info->test_case_name() << "." << test_info->name() << std::endl;
}
}*/
}
};
static bool isTestTagForced(const std::string& testTag)
{
const std::vector<std::string>& forceTags = getTestTagsForceList();
for (size_t i = 0; i < forceTags.size(); ++i)
{
const std::string& forceTag = forceTags[i];
if (testTag == forceTag
|| (testTag.size() >= forceTag.size()
&& forceTag[forceTag.size() - 1] == '*'
&& forceTag.substr(0, forceTag.size() - 1) == testTag.substr(0, forceTag.size() - 1)
)
)
{
return true;
}
}
return false;
}
static bool isTestTagSkipped(const std::string& testTag, CV_OUT std::string& skippedByTag)
{
skippedByTag.clear();
const std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t i = 0; i < skipTags.size(); ++i)
{
const std::string& skipTag = skipTags[i];
if (testTag == skipTag
|| (testTag.size() >= skipTag.size()
&& skipTag[skipTag.size() - 1] == '*'
&& skipTag.substr(0, skipTag.size() - 1) == testTag.substr(0, skipTag.size() - 1)
)
)
{
skippedByTag = skipTag;
return true;
}
}
return false;
}
void checkTestTags()
{
std::string skipTag;
const std::vector<std::string>& testTags = currentDirectTestTags;
{
const std::vector<std::string>& tags = getTestTagsRequiredList();
if (!tags.empty())
{
size_t found = 0;
for (size_t i = 0; i < tags.size(); ++i)
{
const std::string& tag = tags[i];
for (size_t j = 0; j < testTags.size(); ++j)
{
const std::string& testTag = testTags[i];
if (testTag == tag
|| (testTag.size() >= tag.size()
&& tag[tag.size() - 1] == '*'
&& tag.substr(0, tag.size() - 1) == testTag.substr(0, tag.size() - 1)
)
)
{
found++;
break;
}
}
}
if (found != tags.size())
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw details::SkipTestExceptionBase("Test tags don't pass required tags list (--test_tag parameter)", true);
}
}
}
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (isTestTagForced(testTag))
return;
}
std::string skip_message;
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (isTestTagSkipped(testTag, skipTag))
{
testTagIncreaseSkipCount(skipTag, skip_message.empty());
if (skip_message.empty()) skip_message = "Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)";
}
}
const std::vector<std::string>& testTagsImplied = currentImpliedTestTags;
for (size_t i = 0; i < testTagsImplied.size(); ++i)
{
const std::string& testTag = testTagsImplied[i];
if (isTestTagSkipped(testTag, skipTag))
{
testTagIncreaseSkipCount(skipTag, skip_message.empty());
if (skip_message.empty()) skip_message = "Test with tag '" + testTag + "' is skipped (implied '" + skipTag + "' is in skip list)";
}
}
if (!skip_message.empty())
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw details::SkipTestExceptionBase(skip_message, true);
}
}
static bool applyTestTagImpl(const std::string& tag, bool direct = false)
{
CV_Assert(!tag.empty());
std::vector<std::string>& testTags = direct ? currentDirectTestTags : currentImpliedTestTags;
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (tag == testTag)
{
return false; // already exists, skip
}
}
testTags.push_back(tag);
// Tags implies logic
if (tag == CV_TEST_TAG_MEMORY_14GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_6GB);
if (tag == CV_TEST_TAG_MEMORY_6GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_2GB);
if (tag == CV_TEST_TAG_MEMORY_2GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_1GB);
if (tag == CV_TEST_TAG_MEMORY_1GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_512MB);
if (tag == CV_TEST_TAG_VERYLONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_VERYLONG);
applyTestTagImpl(CV_TEST_TAG_LONG);
}
else if (tag == CV_TEST_TAG_DEBUG_VERYLONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_LONG);
}
else if (tag == CV_TEST_TAG_LONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_LONG);
}
if (tag == CV_TEST_TAG_SIZE_4K)
applyTestTagImpl(CV_TEST_TAG_SIZE_FULLHD);
if (tag == CV_TEST_TAG_SIZE_FULLHD)
applyTestTagImpl(CV_TEST_TAG_SIZE_HD);
return true;
}
void applyTestTag(const std::string& tag)
{
if (tag.empty()) return;
if (!applyTestTagImpl(tag, true))
return;
checkTestTags();
}
void applyTestTag_(const std::string& tag)
{
if (tag.empty()) return;
if (!applyTestTagImpl(tag, true))
return;
}
static std::vector<std::string> parseStringList(const std::string& s)
{
std::vector<std::string> result;
size_t start_pos = 0;
while (start_pos != std::string::npos)
{
while (start_pos < s.size() && s[start_pos] == ' ')
start_pos++;
const size_t pos_ = s.find(',', start_pos);
size_t pos = (pos_ == std::string::npos ? s.size() : pos_);
while (pos > start_pos && s[pos - 1] == ' ')
pos--;
if (pos > start_pos)
{
const std::string one_piece(s, start_pos, pos - start_pos);
result.push_back(one_piece);
}
start_pos = (pos_ == std::string::npos ? pos_ : pos_ + 1);
}
return result;
}
void activateTestTags(const cv::CommandLineParser& parser)
{
std::string test_tag_skip = parser.get<std::string>("test_tag_skip");
if (!test_tag_skip.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_skip);
if (!tag_list.empty())
{
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (tag == skipTags[i])
{
found = true;
break;
}
}
if (!found)
skipTags.push_back(tag);
}
}
}
std::string test_tag_enable = parser.get<std::string>("test_tag_enable");
if (!test_tag_enable.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_enable);
if (!tag_list.empty())
{
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (tag == skipTags[i])
{
skipTags.erase(skipTags.begin() + i);
found = true;
}
}
if (!found)
{
std::cerr << "Can't re-enable tag '" << tag << "' - it is not in the skip list" << std::endl;
}
}
}
}
std::string test_tag_force = parser.get<std::string>("test_tag_force");
if (!test_tag_force.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_force);
if (!tag_list.empty())
{
std::vector<std::string>& forceTags = getTestTagsForceList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < forceTags.size(); ++i)
{
if (tag == forceTags[i])
{
found = true;
break;
}
}
if (!found)
forceTags.push_back(tag);
}
}
}
std::string test_tag = parser.get<std::string>("test_tag");
if (!test_tag.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag);
if (!tag_list.empty())
{
std::vector<std::string>& requiredTags = getTestTagsRequiredList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < requiredTags.size(); ++i)
{
if (tag == requiredTags[i])
{
found = true;
break;
}
}
if (!found)
requiredTags.push_back(tag);
}
}
}
printTestTag = parser.get<bool>("test_tag_print");
::testing::UnitTest::GetInstance()->listeners().Append(new TestTagsListener());
}
} // namespace

View File

@@ -0,0 +1,28 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_TS_SRC_TAGS_HPP
#define OPENCV_TS_SRC_TAGS_HPP
// [all | test_tag] - (test_tag_skip - test_tag_enable) + test_tag_force
#define CV_TEST_TAGS_PARAMS \
"{ test_tag | |run tests with specified 'tag' markers only (comma ',' separated list) }" \
"{ test_tag_skip | |skip tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_enable | |don't skip tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_force | |force running of tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_print | false |print assigned tags for each test }" \
// TODO
// "{ test_tag_file | |read test tags assignment }" \
namespace cvtest {
void activateTestTags(const cv::CommandLineParser& parser);
void testTagIncreaseSkipCount(const std::string& tag, bool isMain = true, bool appendSkipTests = false);
} // namespace
#endif // OPENCV_TS_SRC_TAGS_HPP