init - 初始化项目

This commit is contained in:
Lee Nony
2022-05-06 01:58:53 +08:00
commit 90a5cc7cb6
6772 changed files with 2837787 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
set(the_description "Images stitching")
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wstrict-aliasing)
endif()
set(STITCHING_CONTRIB_DEPS "opencv_xfeatures2d")
if(BUILD_SHARED_LIBS AND BUILD_opencv_world AND OPENCV_WORLD_EXCLUDE_EXTRA_MODULES)
set(STITCHING_CONTRIB_DEPS "")
endif()
ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_flann
OPTIONAL opencv_cudaarithm opencv_cudawarping opencv_cudafeatures2d opencv_cudalegacy opencv_cudaimgproc ${STITCHING_CONTRIB_DEPS}
WRAP python)

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

View File

@@ -0,0 +1,357 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_STITCHER_HPP
#define OPENCV_STITCHING_STITCHER_HPP
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/stitching/warpers.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#if defined(Status)
# warning Detected X11 'Status' macro definition, it can cause build conflicts. Please, include this header before any X11 headers.
#endif
/**
@defgroup stitching Images stitching
This figure illustrates the stitching module pipeline implemented in the Stitcher class. Using that
class it's possible to configure/remove some steps, i.e. adjust the stitching pipeline according to
the particular needs. All building blocks from the pipeline are available in the detail namespace,
one can combine and use them separately.
The implemented stitching pipeline is very similar to the one proposed in @cite BL07 .
![stitching pipeline](StitchingPipeline.jpg)
Camera models
-------------
There are currently 2 camera models implemented in stitching pipeline.
- _Homography model_ expecting perspective transformations between images
implemented in @ref cv::detail::BestOf2NearestMatcher cv::detail::HomographyBasedEstimator
cv::detail::BundleAdjusterReproj cv::detail::BundleAdjusterRay
- _Affine model_ expecting affine transformation with 6 DOF or 4 DOF implemented in
@ref cv::detail::AffineBestOf2NearestMatcher cv::detail::AffineBasedEstimator
cv::detail::BundleAdjusterAffine cv::detail::BundleAdjusterAffinePartial cv::AffineWarper
Homography model is useful for creating photo panoramas captured by camera,
while affine-based model can be used to stitch scans and object captured by
specialized devices. Use @ref cv::Stitcher::create to get preconfigured pipeline for one
of those models.
@note
Certain detailed settings of @ref cv::Stitcher might not make sense. Especially
you should not mix classes implementing affine model and classes implementing
Homography model, as they work with different transformations.
@{
@defgroup stitching_match Features Finding and Images Matching
@defgroup stitching_rotation Rotation Estimation
@defgroup stitching_autocalib Autocalibration
@defgroup stitching_warp Images Warping
@defgroup stitching_seam Seam Estimation
@defgroup stitching_exposure Exposure Compensation
@defgroup stitching_blend Image Blenders
@}
*/
namespace cv {
//! @addtogroup stitching
//! @{
/** @example samples/cpp/stitching.cpp
A basic example on image stitching
*/
/** @example samples/python/stitching.py
A basic example on image stitching in Python.
*/
/** @example samples/cpp/stitching_detailed.cpp
A detailed example on image stitching
*/
/** @brief High level image stitcher.
It's possible to use this class without being aware of the entire stitching pipeline. However, to
be able to achieve higher stitching stability and quality of the final images at least being
familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
opencv_source_code/samples/cpp/stitching.cpp
- A basic example on image stitching in Python can be found at
opencv_source_code/samples/python/stitching.py
- A detailed example on image stitching can be found at
opencv_source_code/samples/cpp/stitching_detailed.cpp
*/
class CV_EXPORTS_W Stitcher
{
public:
/**
* When setting a resolution for stitching, this values is a placeholder
* for preserving the original resolution.
*/
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/)
static constexpr double ORIG_RESOL = -1.0;
#else
// support MSVS 2013
static const double ORIG_RESOL; // Initialized in stitcher.cpp
#endif
enum Status
{
OK = 0,
ERR_NEED_MORE_IMGS = 1,
ERR_HOMOGRAPHY_EST_FAIL = 2,
ERR_CAMERA_PARAMS_ADJUST_FAIL = 3
};
enum Mode
{
/** Mode for creating photo panoramas. Expects images under perspective
transformation and projects resulting pano to sphere.
@sa detail::BestOf2NearestMatcher SphericalWarper
*/
PANORAMA = 0,
/** Mode for composing scans. Expects images under affine transformation does
not compensate exposure by default.
@sa detail::AffineBestOf2NearestMatcher AffineWarper
*/
SCANS = 1,
};
/** @brief Creates a Stitcher configured in one of the stitching modes.
@param mode Scenario for stitcher operation. This is usually determined by source of images
to stitch and their transformation. Default parameters will be chosen for operation in given
scenario.
@return Stitcher class instance.
*/
CV_WRAP static Ptr<Stitcher> create(Mode mode = Stitcher::PANORAMA);
CV_WRAP double registrationResol() const { return registr_resol_; }
CV_WRAP void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; }
CV_WRAP double seamEstimationResol() const { return seam_est_resol_; }
CV_WRAP void setSeamEstimationResol(double resol_mpx) { seam_est_resol_ = resol_mpx; }
CV_WRAP double compositingResol() const { return compose_resol_; }
CV_WRAP void setCompositingResol(double resol_mpx) { compose_resol_ = resol_mpx; }
CV_WRAP double panoConfidenceThresh() const { return conf_thresh_; }
CV_WRAP void setPanoConfidenceThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
CV_WRAP bool waveCorrection() const { return do_wave_correct_; }
CV_WRAP void setWaveCorrection(bool flag) { do_wave_correct_ = flag; }
CV_WRAP InterpolationFlags interpolationFlags() const { return interp_flags_; }
CV_WRAP void setInterpolationFlags(InterpolationFlags interp_flags) { interp_flags_ = interp_flags; }
detail::WaveCorrectKind waveCorrectKind() const { return wave_correct_kind_; }
void setWaveCorrectKind(detail::WaveCorrectKind kind) { wave_correct_kind_ = kind; }
Ptr<Feature2D> featuresFinder() { return features_finder_; }
const Ptr<Feature2D> featuresFinder() const { return features_finder_; }
void setFeaturesFinder(Ptr<Feature2D> features_finder)
{ features_finder_ = features_finder; }
Ptr<detail::FeaturesMatcher> featuresMatcher() { return features_matcher_; }
const Ptr<detail::FeaturesMatcher> featuresMatcher() const { return features_matcher_; }
void setFeaturesMatcher(Ptr<detail::FeaturesMatcher> features_matcher)
{ features_matcher_ = features_matcher; }
const cv::UMat& matchingMask() const { return matching_mask_; }
void setMatchingMask(const cv::UMat &mask)
{
CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows);
matching_mask_ = mask.clone();
}
Ptr<detail::BundleAdjusterBase> bundleAdjuster() { return bundle_adjuster_; }
const Ptr<detail::BundleAdjusterBase> bundleAdjuster() const { return bundle_adjuster_; }
void setBundleAdjuster(Ptr<detail::BundleAdjusterBase> bundle_adjuster)
{ bundle_adjuster_ = bundle_adjuster; }
Ptr<detail::Estimator> estimator() { return estimator_; }
const Ptr<detail::Estimator> estimator() const { return estimator_; }
void setEstimator(Ptr<detail::Estimator> estimator)
{ estimator_ = estimator; }
Ptr<WarperCreator> warper() { return warper_; }
const Ptr<WarperCreator> warper() const { return warper_; }
void setWarper(Ptr<WarperCreator> creator) { warper_ = creator; }
Ptr<detail::ExposureCompensator> exposureCompensator() { return exposure_comp_; }
const Ptr<detail::ExposureCompensator> exposureCompensator() const { return exposure_comp_; }
void setExposureCompensator(Ptr<detail::ExposureCompensator> exposure_comp)
{ exposure_comp_ = exposure_comp; }
Ptr<detail::SeamFinder> seamFinder() { return seam_finder_; }
const Ptr<detail::SeamFinder> seamFinder() const { return seam_finder_; }
void setSeamFinder(Ptr<detail::SeamFinder> seam_finder) { seam_finder_ = seam_finder; }
Ptr<detail::Blender> blender() { return blender_; }
const Ptr<detail::Blender> blender() const { return blender_; }
void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
/** @brief These functions try to match the given images and to estimate rotations of each camera.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param masks Masks for each input image specifying where to look for keypoints (optional).
@return Status code.
*/
CV_WRAP Status estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks = noArray());
/** @brief These function restors camera rotation and camera intrinsics of each camera
* that can be got with @ref Stitcher::cameras call
@param images Input images.
@param cameras Estimated rotation of cameras for each of the input images.
@param component Indices (0-based) of images constituting the final panorama (optional).
@return Status code.
*/
Status setTransform(InputArrayOfArrays images,
const std::vector<detail::CameraParams> &cameras,
const std::vector<int> &component);
/** @overload */
Status setTransform(InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras);
/** @overload */
CV_WRAP Status composePanorama(OutputArray pano);
/** @brief These functions try to compose the given images (or images stored internally from the other function
calls) into the final pano under the assumption that the image transformations were estimated
before.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param pano Final pano.
@return Status code.
*/
CV_WRAP Status composePanorama(InputArrayOfArrays images, OutputArray pano);
/** @overload */
CV_WRAP Status stitch(InputArrayOfArrays images, OutputArray pano);
/** @brief These functions try to stitch the given images.
@param images Input images.
@param masks Masks for each input image specifying where to look for keypoints (optional).
@param pano Final pano.
@return Status code.
*/
CV_WRAP Status stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano);
std::vector<int> component() const { return indices_; }
std::vector<detail::CameraParams> cameras() const { return cameras_; }
CV_WRAP double workScale() const { return work_scale_; }
UMat resultMask() const { return result_mask_; }
private:
Status matchImages();
Status estimateCameraParams();
double registr_resol_;
double seam_est_resol_;
double compose_resol_;
double conf_thresh_;
InterpolationFlags interp_flags_;
Ptr<Feature2D> features_finder_;
Ptr<detail::FeaturesMatcher> features_matcher_;
cv::UMat matching_mask_;
Ptr<detail::BundleAdjusterBase> bundle_adjuster_;
Ptr<detail::Estimator> estimator_;
bool do_wave_correct_;
detail::WaveCorrectKind wave_correct_kind_;
Ptr<WarperCreator> warper_;
Ptr<detail::ExposureCompensator> exposure_comp_;
Ptr<detail::SeamFinder> seam_finder_;
Ptr<detail::Blender> blender_;
std::vector<cv::UMat> imgs_;
std::vector<cv::UMat> masks_;
std::vector<cv::Size> full_img_sizes_;
std::vector<detail::ImageFeatures> features_;
std::vector<detail::MatchesInfo> pairwise_matches_;
std::vector<cv::UMat> seam_est_imgs_;
std::vector<int> indices_;
std::vector<detail::CameraParams> cameras_;
UMat result_mask_;
double work_scale_;
double seam_scale_;
double seam_work_aspect_;
double warped_image_scale_;
};
/**
* @deprecated use Stitcher::create
*/
CV_DEPRECATED Ptr<Stitcher> createStitcher(bool try_use_gpu = false);
/**
* @deprecated use Stitcher::create
*/
CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool try_use_gpu = false);
//! @} stitching
} // namespace cv
#endif // OPENCV_STITCHING_STITCHER_HPP

View File

@@ -0,0 +1,86 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_AUTOCALIB_HPP
#define OPENCV_STITCHING_AUTOCALIB_HPP
#include "opencv2/core.hpp"
#include "matchers.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_autocalib
//! @{
/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera
undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0_ok True, if f0 was estimated successfully, false otherwise.
@param f1_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
*/
void CV_EXPORTS_W focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<double> &focals);
bool CV_EXPORTS_W calibrateRotatingCamera(const std::vector<Mat> &Hs,CV_OUT Mat &K);
//! @} stitching_autocalib
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_AUTOCALIB_HPP

View File

@@ -0,0 +1,184 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_BLENDERS_HPP
#define OPENCV_STITCHING_BLENDERS_HPP
#if defined(NO)
# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers.
#endif
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_blend
//! @{
/** @brief Base class for all blenders.
Simple blender which puts one image over another
*/
class CV_EXPORTS_W Blender
{
public:
virtual ~Blender() {}
enum { NO, FEATHER, MULTI_BAND };
CV_WRAP static Ptr<Blender> createDefault(int type, bool try_gpu = false);
/** @brief Prepares the blender for blending.
@param corners Source images top-left corners
@param sizes Source image sizes
*/
CV_WRAP virtual void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
/** @overload */
CV_WRAP virtual void prepare(Rect dst_roi);
/** @brief Processes the image.
@param img Source image
@param mask Source image mask
@param tl Source image top-left corners
*/
CV_WRAP virtual void feed(InputArray img, InputArray mask, Point tl);
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst_mask Final pano mask
*/
CV_WRAP virtual void blend(CV_IN_OUT InputOutputArray dst,CV_IN_OUT InputOutputArray dst_mask);
protected:
UMat dst_, dst_mask_;
Rect dst_roi_;
};
/** @brief Simple blender which mixes images at its borders.
*/
class CV_EXPORTS_W FeatherBlender : public Blender
{
public:
CV_WRAP FeatherBlender(float sharpness = 0.02f);
CV_WRAP float sharpness() const { return sharpness_; }
CV_WRAP void setSharpness(float val) { sharpness_ = val; }
CV_WRAP void prepare(Rect dst_roi) CV_OVERRIDE;
CV_WRAP void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
CV_WRAP void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE;
//! Creates weight maps for fixed set of source images by their masks and top-left corners.
//! Final image can be obtained by simple weighting of the source images.
CV_WRAP Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &weight_maps);
private:
float sharpness_;
UMat weight_map_;
UMat dst_weight_map_;
};
inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }
/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).
*/
class CV_EXPORTS_W MultiBandBlender : public Blender
{
public:
CV_WRAP MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F);
CV_WRAP int numBands() const { return actual_num_bands_; }
CV_WRAP void setNumBands(int val) { actual_num_bands_ = val; }
CV_WRAP void prepare(Rect dst_roi) CV_OVERRIDE;
CV_WRAP void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE;
CV_WRAP void blend(CV_IN_OUT InputOutputArray dst, CV_IN_OUT InputOutputArray dst_mask) CV_OVERRIDE;
private:
int actual_num_bands_, num_bands_;
std::vector<UMat> dst_pyr_laplace_;
std::vector<UMat> dst_band_weights_;
Rect dst_roi_final_;
bool can_use_gpu_;
int weight_type_; //CV_32F or CV_16S
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
std::vector<cuda::GpuMat> gpu_dst_pyr_laplace_;
std::vector<cuda::GpuMat> gpu_dst_band_weights_;
std::vector<Point> gpu_tl_points_;
std::vector<cuda::GpuMat> gpu_imgs_with_border_;
std::vector<std::vector<cuda::GpuMat> > gpu_weight_pyr_gauss_vec_;
std::vector<std::vector<cuda::GpuMat> > gpu_src_pyr_laplace_vec_;
std::vector<std::vector<cuda::GpuMat> > gpu_ups_;
cuda::GpuMat gpu_dst_mask_;
cuda::GpuMat gpu_mask_;
cuda::GpuMat gpu_img_;
cuda::GpuMat gpu_weight_map_;
cuda::GpuMat gpu_add_mask_;
int gpu_feed_idx_;
bool gpu_initialized_;
#endif
};
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
void CV_EXPORTS_W normalizeUsingWeightMap(InputArray weight, CV_IN_OUT InputOutputArray src);
void CV_EXPORTS_W createWeightMap(InputArray mask, float sharpness, CV_IN_OUT InputOutputArray weight);
void CV_EXPORTS_W createLaplacePyr(InputArray img, int num_levels, CV_IN_OUT std::vector<UMat>& pyr);
void CV_EXPORTS_W createLaplacePyrGpu(InputArray img, int num_levels, CV_IN_OUT std::vector<UMat>& pyr);
// Restores source image
void CV_EXPORTS_W restoreImageFromLaplacePyr(CV_IN_OUT std::vector<UMat>& pyr);
void CV_EXPORTS_W restoreImageFromLaplacePyrGpu(CV_IN_OUT std::vector<UMat>& pyr);
//! @}
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_BLENDERS_HPP

View File

@@ -0,0 +1,78 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_CAMERA_HPP
#define OPENCV_STITCHING_CAMERA_HPP
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
/** @brief Describes camera parameters.
@note Translation is assumed to be zero during the whole stitching pipeline. :
*/
struct CV_EXPORTS_W_SIMPLE CameraParams
{
CameraParams();
CameraParams(const CameraParams& other);
CameraParams& operator =(const CameraParams& other);
CV_WRAP Mat K() const;
CV_PROP_RW double focal; // Focal length
CV_PROP_RW double aspect; // Aspect ratio
CV_PROP_RW double ppx; // Principal point X
CV_PROP_RW double ppy; // Principal point Y
CV_PROP_RW Mat R; // Rotation
CV_PROP_RW Mat t; // Translation
};
//! @}
} // namespace detail
} // namespace cv
#endif // #ifndef OPENCV_STITCHING_CAMERA_HPP

View File

@@ -0,0 +1,245 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP
#define OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP
#if defined(NO)
# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers.
#endif
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_exposure
//! @{
/** @brief Base class for all exposure compensators.
*/
class CV_EXPORTS_W ExposureCompensator
{
public:
ExposureCompensator(): updateGain(true) {}
virtual ~ExposureCompensator() {}
enum { NO, GAIN, GAIN_BLOCKS, CHANNELS, CHANNELS_BLOCKS };
CV_WRAP static Ptr<ExposureCompensator> createDefault(int type);
/**
@param corners Source image top-left corners
@param images Source images
@param masks Image masks to update (second value in pair specifies the value which should be used
to detect where image is)
*/
CV_WRAP void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
/** @overload */
virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat, uchar> > &masks) = 0;
/** @brief Compensate exposure in the specified image.
@param index Image index
@param corner Image top-left corner
@param image Image to process
@param mask Image mask
*/
CV_WRAP virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
CV_WRAP virtual void getMatGains(CV_OUT std::vector<Mat>& ) {CV_Error(Error::StsInternal, "");};
CV_WRAP virtual void setMatGains(std::vector<Mat>& ) { CV_Error(Error::StsInternal, ""); };
CV_WRAP void setUpdateGain(bool b) { updateGain = b; };
CV_WRAP bool getUpdateGain() { return updateGain; };
protected :
bool updateGain;
};
/** @brief Stub exposure compensator which does nothing.
*/
class CV_EXPORTS_W NoExposureCompensator : public ExposureCompensator
{
public:
void feed(const std::vector<Point> &/*corners*/, const std::vector<UMat> &/*images*/,
const std::vector<std::pair<UMat,uchar> > &/*masks*/) CV_OVERRIDE { }
CV_WRAP void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) CV_OVERRIDE { }
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE { umv.clear(); return; };
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE { umv.clear(); return; };
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities, see @cite BL07 and @cite WJ10 for details.
*/
class CV_EXPORTS_W GainCompensator : public ExposureCompensator
{
public:
// This Constructor only exists to make source level compatibility detector happy
CV_WRAP GainCompensator()
: GainCompensator(1) {}
CV_WRAP GainCompensator(int nr_feeds)
: nr_feeds_(nr_feeds), similarity_threshold_(1) {}
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
void singleFeed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks);
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE ;
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE ;
CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
CV_WRAP int getNrFeeds() { return nr_feeds_; }
CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
void prepareSimilarityMask(const std::vector<Point> &corners, const std::vector<UMat> &images);
std::vector<double> gains() const;
private:
UMat buildSimilarityMask(InputArray src_array1, InputArray src_array2);
Mat_<double> gains_;
int nr_feeds_;
double similarity_threshold_;
std::vector<UMat> similarities_;
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities on each channel independently.
*/
class CV_EXPORTS_W ChannelsCompensator : public ExposureCompensator
{
public:
CV_WRAP ChannelsCompensator(int nr_feeds=1)
: nr_feeds_(nr_feeds), similarity_threshold_(1) {}
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE;
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE;
CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
CV_WRAP int getNrFeeds() { return nr_feeds_; }
CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
std::vector<Scalar> gains() const { return gains_; }
private:
std::vector<Scalar> gains_;
int nr_feeds_;
double similarity_threshold_;
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image blocks.
*/
class CV_EXPORTS_W BlocksCompensator : public ExposureCompensator
{
public:
BlocksCompensator(int bl_width=32, int bl_height=32, int nr_feeds=1)
: bl_width_(bl_width), bl_height_(bl_height), nr_feeds_(nr_feeds), nr_gain_filtering_iterations_(2),
similarity_threshold_(1) {}
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE;
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE;
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE;
CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; }
CV_WRAP int getNrFeeds() { return nr_feeds_; }
CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; }
CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; }
CV_WRAP void setBlockSize(int width, int height) { bl_width_ = width; bl_height_ = height; }
CV_WRAP void setBlockSize(Size size) { setBlockSize(size.width, size.height); }
CV_WRAP Size getBlockSize() const { return Size(bl_width_, bl_height_); }
CV_WRAP void setNrGainsFilteringIterations(int nr_iterations) { nr_gain_filtering_iterations_ = nr_iterations; }
CV_WRAP int getNrGainsFilteringIterations() const { return nr_gain_filtering_iterations_; }
protected:
template<class Compensator>
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks);
private:
UMat getGainMap(const GainCompensator& compensator, int bl_idx, Size bl_per_img);
UMat getGainMap(const ChannelsCompensator& compensator, int bl_idx, Size bl_per_img);
int bl_width_, bl_height_;
std::vector<UMat> gain_maps_;
int nr_feeds_;
int nr_gain_filtering_iterations_;
double similarity_threshold_;
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
intensities, see @cite UES01 for details.
*/
class CV_EXPORTS_W BlocksGainCompensator : public BlocksCompensator
{
public:
// This Constructor only exists to make source level compatibility detector happy
CV_WRAP BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
: BlocksGainCompensator(bl_width, bl_height, 1) {}
CV_WRAP BlocksGainCompensator(int bl_width, int bl_height, int nr_feeds)
: BlocksCompensator(bl_width, bl_height, nr_feeds) {}
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
// This function only exists to make source level compatibility detector happy
CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE {
BlocksCompensator::apply(index, corner, image, mask); }
// This function only exists to make source level compatibility detector happy
CV_WRAP void getMatGains(CV_OUT std::vector<Mat>& umv) CV_OVERRIDE { BlocksCompensator::getMatGains(umv); }
// This function only exists to make source level compatibility detector happy
CV_WRAP void setMatGains(std::vector<Mat>& umv) CV_OVERRIDE { BlocksCompensator::setMatGains(umv); }
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
on each channel.
*/
class CV_EXPORTS_W BlocksChannelsCompensator : public BlocksCompensator
{
public:
CV_WRAP BlocksChannelsCompensator(int bl_width=32, int bl_height=32, int nr_feeds=1)
: BlocksCompensator(bl_width, bl_height, nr_feeds) {}
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) CV_OVERRIDE;
};
//! @}
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP

View File

@@ -0,0 +1,253 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_MATCHERS_HPP
#define OPENCV_STITCHING_MATCHERS_HPP
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/opencv_modules.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_match
//! @{
/** @brief Structure containing image keypoints and descriptors. */
struct CV_EXPORTS_W_SIMPLE ImageFeatures
{
CV_PROP_RW int img_idx;
CV_PROP_RW Size img_size;
std::vector<KeyPoint> keypoints;
CV_PROP_RW UMat descriptors;
CV_WRAP std::vector<KeyPoint> getKeypoints() { return keypoints; };
};
/** @brief
@param featuresFinder
@param images
@param features
@param masks
*/
CV_EXPORTS_W void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArrayOfArrays images,
CV_OUT std::vector<ImageFeatures> &features,
InputArrayOfArrays masks = noArray());
/** @brief
@param featuresFinder
@param image
@param features
@param mask
*/
CV_EXPORTS_AS(computeImageFeatures2) void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArray image,
CV_OUT ImageFeatures &features,
InputArray mask = noArray());
/** @brief Structure containing information about matches between two images.
It's assumed that there is a transformation between those images. Transformation may be
homography or affine transformation based on selected matcher.
@sa detail::FeaturesMatcher
*/
struct CV_EXPORTS_W_SIMPLE MatchesInfo
{
MatchesInfo();
MatchesInfo(const MatchesInfo &other);
MatchesInfo& operator =(const MatchesInfo &other);
CV_PROP_RW int src_img_idx;
CV_PROP_RW int dst_img_idx; //!< Images indices (optional)
std::vector<DMatch> matches;
std::vector<uchar> inliers_mask; //!< Geometrically consistent matches mask
CV_PROP_RW int num_inliers; //!< Number of geometrically consistent matches
CV_PROP_RW Mat H; //!< Estimated transformation
CV_PROP_RW double confidence; //!< Confidence two images are from the same panorama
CV_WRAP std::vector<DMatch> getMatches() { return matches; };
CV_WRAP std::vector<uchar> getInliers() { return inliers_mask; };
};
/** @brief Feature matchers base class. */
class CV_EXPORTS_W FeaturesMatcher
{
public:
CV_WRAP virtual ~FeaturesMatcher() {}
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches_info Found matches
*/
CV_WRAP_AS(apply) void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
CV_OUT MatchesInfo& matches_info) { match(features1, features2, matches_info); }
/** @brief Performs images matching.
@param features Features of the source images
@param pairwise_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
@sa detail::MatchesInfo
*/
CV_WRAP_AS(apply2) void operator ()(const std::vector<ImageFeatures> &features, CV_OUT std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
/** @return True, if it's possible to use the same matcher instance in parallel, false otherwise
*/
CV_WRAP bool isThreadSafe() const { return is_thread_safe_; }
/** @brief Frees unused memory allocated before if there is any.
*/
CV_WRAP virtual void collectGarbage() {}
protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()_ work.
@param features1 first image features
@param features2 second image features
@param matches_info found matches
*/
virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) = 0;
bool is_thread_safe_;
};
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match_conf
@sa detail::FeaturesMatcher
*/
class CV_EXPORTS_W BestOf2NearestMatcher : public FeaturesMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num_matches_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
CV_WRAP BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6);
CV_WRAP void collectGarbage() CV_OVERRIDE;
CV_WRAP static Ptr<BestOf2NearestMatcher> create(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6);
protected:
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
int num_matches_thresh1_;
int num_matches_thresh2_;
Ptr<FeaturesMatcher> impl_;
};
class CV_EXPORTS_W BestOf2NearestRangeMatcher : public BestOf2NearestMatcher
{
public:
CV_WRAP BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f,
int num_matches_thresh1 = 6, int num_matches_thresh2 = 6);
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
protected:
int range_width_;
};
/** @brief Features matcher similar to cv::detail::BestOf2NearestMatcher which
finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match_conf.
Unlike cv::detail::BestOf2NearestMatcher this matcher uses affine
transformation (affine transformation estimate will be placed in matches_info).
@sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher
*/
class CV_EXPORTS_W AffineBestOf2NearestMatcher : public BestOf2NearestMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher that expects affine transformation
between images
@param full_affine whether to use full affine transformation with 6 degress of freedom or reduced
transformation with 4 degrees of freedom using only rotation, translation and uniform scaling
@param try_use_gpu Should try to use GPU or not
@param match_conf Match distances ration threshold
@param num_matches_thresh1 Minimum number of matches required for the 2D affine transform
estimation used in the inliers classification step
@sa cv::estimateAffine2D cv::estimateAffinePartial2D
*/
CV_WRAP AffineBestOf2NearestMatcher(bool full_affine = false, bool try_use_gpu = false,
float match_conf = 0.3f, int num_matches_thresh1 = 6) :
BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh1),
full_affine_(full_affine) {}
protected:
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE;
bool full_affine_;
};
//! @} stitching_match
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_MATCHERS_HPP

View File

@@ -0,0 +1,373 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_MOTION_ESTIMATORS_HPP
#define OPENCV_STITCHING_MOTION_ESTIMATORS_HPP
#include "opencv2/core.hpp"
#include "matchers.hpp"
#include "util.hpp"
#include "camera.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_rotation
//! @{
/** @brief Rotation estimator base class.
It takes features of all images, pairwise matches between all images and estimates rotations of all
cameras.
@note The coordinate system origin is implementation-dependent, but you can always normalize the
rotations in respect to the first camera, for instance. :
*/
class CV_EXPORTS_W Estimator
{
public:
virtual ~Estimator() {}
/** @brief Estimates camera parameters.
@param features Features of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
CV_WRAP_AS(apply) bool operator ()(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
CV_OUT CV_IN_OUT std::vector<CameraParams> &cameras)
{
return estimate(features, pairwise_matches, cameras);
}
protected:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()_ work.
@param features Features of images
@param pairwise_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
CV_OUT std::vector<CameraParams> &cameras) = 0;
};
/** @brief Homography based rotation estimator.
*/
class CV_EXPORTS_W HomographyBasedEstimator : public Estimator
{
public:
CV_WRAP HomographyBasedEstimator(bool is_focals_estimated = false)
: is_focals_estimated_(is_focals_estimated) {}
private:
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) CV_OVERRIDE;
bool is_focals_estimated_;
};
/** @brief Affine transformation based estimator.
This estimator uses pairwise transformations estimated by matcher to estimate
final transformation for each camera.
@sa cv::detail::HomographyBasedEstimator
*/
class CV_EXPORTS_W AffineBasedEstimator : public Estimator
{
public:
CV_WRAP AffineBasedEstimator(){}
private:
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) CV_OVERRIDE;
};
/** @brief Base class for all camera parameters refinement methods.
*/
class CV_EXPORTS_W BundleAdjusterBase : public Estimator
{
public:
CV_WRAP const Mat refinementMask() const { return refinement_mask_.clone(); }
CV_WRAP void setRefinementMask(const Mat &mask)
{
CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3));
refinement_mask_ = mask.clone();
}
CV_WRAP double confThresh() const { return conf_thresh_; }
CV_WRAP void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
CV_WRAP TermCriteria termCriteria() { return term_criteria_; }
CV_WRAP void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }
protected:
/** @brief Construct a bundle adjuster base instance.
@param num_params_per_cam Number of parameters per camera
@param num_errs_per_measurement Number of error terms (components) per match
*/
BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
: num_images_(0), total_num_matches_(0),
num_params_per_cam_(num_params_per_cam),
num_errs_per_measurement_(num_errs_per_measurement),
features_(0), pairwise_matches_(0), conf_thresh_(0)
{
setRefinementMask(Mat::ones(3, 3, CV_8U));
setConfThresh(1.);
setTermCriteria(TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 1000, DBL_EPSILON));
}
// Runs bundle adjustment
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) CV_OVERRIDE;
/** @brief Sets initial camera parameter to refine.
@param cameras Camera parameters
*/
virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0;
/** @brief Gets the refined camera parameters.
@param cameras Refined camera parameters
*/
virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
/** @brief Calculates error vector.
@param err Error column-vector of length total_num_matches \* num_errs_per_measurement
*/
virtual void calcError(Mat &err) = 0;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total_num_matches \* num_errs_per_measurement) x (num_images \* num_params_per_cam)
*/
virtual void calcJacobian(Mat &jac) = 0;
// 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine
Mat refinement_mask_;
int num_images_;
int total_num_matches_;
int num_params_per_cam_;
int num_errs_per_measurement_;
const ImageFeatures *features_;
const MatchesInfo *pairwise_matches_;
// Threshold to filter out poorly matched image pairs
double conf_thresh_;
//Levenberg-Marquardt algorithm termination criteria
TermCriteria term_criteria_;
// Camera parameters matrix (CV_64F)
Mat cam_params_;
// Connected images pairs
std::vector<std::pair<int,int> > edges_;
};
/** @brief Stub bundle adjuster that does nothing.
*/
class CV_EXPORTS_W NoBundleAdjuster : public BundleAdjusterBase
{
public:
CV_WRAP NoBundleAdjuster() : BundleAdjusterBase(0, 0) {}
private:
bool estimate(const std::vector<ImageFeatures> &, const std::vector<MatchesInfo> &,
std::vector<CameraParams> &) CV_OVERRIDE
{
return true;
}
void setUpInitialCameraParams(const std::vector<CameraParams> &) CV_OVERRIDE {}
void obtainRefinedCameraParams(std::vector<CameraParams> &) const CV_OVERRIDE {}
void calcError(Mat &) CV_OVERRIDE {}
void calcJacobian(Mat &) CV_OVERRIDE {}
};
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It can estimate focal length, aspect ratio, principal point.
You can affect only on them via the refinement mask.
*/
class CV_EXPORTS_W BundleAdjusterReproj : public BundleAdjusterBase
{
public:
CV_WRAP BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances
between the rays passing through the camera center and a feature. :
It can estimate focal length. It ignores the refinement mask for now.
*/
class CV_EXPORTS_W BundleAdjusterRay : public BundleAdjusterBase
{
public:
CV_WRAP BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
/** @brief Bundle adjuster that expects affine transformation
represented in homogeneous coordinates in R for each camera param. Implements
camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffinePartial
*/
class CV_EXPORTS_W BundleAdjusterAffine : public BundleAdjusterBase
{
public:
CV_WRAP BundleAdjusterAffine() : BundleAdjusterBase(6, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
/** @brief Bundle adjuster that expects affine transformation with 4 DOF
represented in homogeneous coordinates in R for each camera param. Implements
camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It estimates all transformation parameters. Refinement mask is ignored.
@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffine
*/
class CV_EXPORTS_W BundleAdjusterAffinePartial : public BundleAdjusterBase
{
public:
CV_WRAP BundleAdjusterAffinePartial() : BundleAdjusterBase(4, 2) {}
private:
void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) CV_OVERRIDE;
void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const CV_OVERRIDE;
void calcError(Mat &err) CV_OVERRIDE;
void calcJacobian(Mat &jac) CV_OVERRIDE;
Mat err1_, err2_;
};
enum WaveCorrectKind
{
WAVE_CORRECT_HORIZ,
WAVE_CORRECT_VERT,
WAVE_CORRECT_AUTO
};
/** @brief Tries to detect the wave correction kind depending
on whether a panorama spans horizontally or vertically
@param rmats Camera rotation matrices.
@return The correction kind to use for this panorama
*/
CV_EXPORTS
WaveCorrectKind autoDetectWaveCorrectKind(const std::vector<Mat> &rmats);
/** @brief Tries to make panorama more horizontal (or vertical).
@param rmats Camera rotation matrices.
@param kind Correction kind, see detail::WaveCorrectKind.
*/
void CV_EXPORTS_W waveCorrect(CV_IN_OUT std::vector<Mat> &rmats, WaveCorrectKind kind);
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
// Returns matches graph representation in DOT language
String CV_EXPORTS_W matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold);
CV_EXPORTS_W std::vector<int> leaveBiggestComponent(
std::vector<ImageFeatures> &features,
std::vector<MatchesInfo> &pairwise_matches,
float conf_threshold);
void CV_EXPORTS findMaxSpanningTree(
int num_images, const std::vector<MatchesInfo> &pairwise_matches,
Graph &span_tree, std::vector<int> &centers);
//! @} stitching_rotation
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_MOTION_ESTIMATORS_HPP

View File

@@ -0,0 +1,291 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_SEAM_FINDERS_HPP
#define OPENCV_STITCHING_SEAM_FINDERS_HPP
#include <set>
#include "opencv2/core.hpp"
#include "opencv2/opencv_modules.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_seam
//! @{
/** @brief Base class for a seam estimator.
*/
class CV_EXPORTS_W SeamFinder
{
public:
CV_WRAP virtual ~SeamFinder() {}
enum { NO, VORONOI_SEAM, DP_SEAM };
/** @brief Estimates seams.
@param src Source images
@param corners Source image top-left corners
@param masks Source image masks to update
*/
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) = 0;
CV_WRAP static Ptr<SeamFinder> createDefault(int type);
};
/** @brief Stub seam estimator which does nothing.
*/
class CV_EXPORTS_W NoSeamFinder : public SeamFinder
{
public:
CV_WRAP void find(const std::vector<UMat>&, const std::vector<Point>&, CV_IN_OUT std::vector<UMat>&) CV_OVERRIDE {}
};
/** @brief Base class for all pairwise seam estimators.
*/
class CV_EXPORTS_W PairwiseSeamFinder : public SeamFinder
{
public:
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) CV_OVERRIDE;
protected:
void run();
/** @brief Resolves masks intersection of two specified images in the given ROI.
@param first First image index
@param second Second image index
@param roi Region of interest
*/
virtual void findInPair(size_t first, size_t second, Rect roi) = 0;
std::vector<UMat> images_;
std::vector<Size> sizes_;
std::vector<Point> corners_;
std::vector<UMat> masks_;
};
/** @brief Voronoi diagram-based seam estimator.
*/
class CV_EXPORTS_W VoronoiSeamFinder : public PairwiseSeamFinder
{
public:
CV_WRAP virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
CV_IN_OUT std::vector<UMat> &masks) CV_OVERRIDE;
virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,
std::vector<UMat> &masks);
private:
void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE;
};
class CV_EXPORTS_W DpSeamFinder : public SeamFinder
{
public:
enum CostFunction { COLOR, COLOR_GRAD };
DpSeamFinder(CostFunction costFunc = COLOR);
CV_WRAP DpSeamFinder(String costFunc );
CostFunction costFunction() const { return costFunc_; }
void setCostFunction(CostFunction val) { costFunc_ = val; }
CV_WRAP void setCostFunction(String val);
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
private:
enum ComponentState
{
FIRST = 1, SECOND = 2, INTERS = 4,
INTERS_FIRST = INTERS | FIRST,
INTERS_SECOND = INTERS | SECOND
};
class ImagePairLess
{
public:
ImagePairLess(const std::vector<Mat> &images, const std::vector<Point> &corners)
: src_(&images[0]), corners_(&corners[0]) {}
bool operator() (const std::pair<size_t, size_t> &l, const std::pair<size_t, size_t> &r) const
{
Point c1 = corners_[l.first] + Point(src_[l.first].cols / 2, src_[l.first].rows / 2);
Point c2 = corners_[l.second] + Point(src_[l.second].cols / 2, src_[l.second].rows / 2);
int d1 = (c1 - c2).dot(c1 - c2);
c1 = corners_[r.first] + Point(src_[r.first].cols / 2, src_[r.first].rows / 2);
c2 = corners_[r.second] + Point(src_[r.second].cols / 2, src_[r.second].rows / 2);
int d2 = (c1 - c2).dot(c1 - c2);
return d1 < d2;
}
private:
const Mat *src_;
const Point *corners_;
};
class ClosePoints
{
public:
ClosePoints(int minDist) : minDist_(minDist) {}
bool operator() (const Point &p1, const Point &p2) const
{
int dist2 = (p1.x-p2.x) * (p1.x-p2.x) + (p1.y-p2.y) * (p1.y-p2.y);
return dist2 < minDist_ * minDist_;
}
private:
int minDist_;
};
void process(
const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
void findComponents();
void findEdges();
void resolveConflicts(
const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
void computeGradients(const Mat &image1, const Mat &image2);
bool hasOnlyOneNeighbor(int comp);
bool closeToContour(int y, int x, const Mat_<uchar> &contourMask);
bool getSeamTips(int comp1, int comp2, Point &p1, Point &p2);
void computeCosts(
const Mat &image1, const Mat &image2, Point tl1, Point tl2,
int comp, Mat_<float> &costV, Mat_<float> &costH);
bool estimateSeam(
const Mat &image1, const Mat &image2, Point tl1, Point tl2, int comp,
Point p1, Point p2, std::vector<Point> &seam, bool &isHorizontal);
void updateLabelsUsingSeam(
int comp1, int comp2, const std::vector<Point> &seam, bool isHorizontalSeam);
CostFunction costFunc_;
// processing images pair data
Point unionTl_, unionBr_;
Size unionSize_;
Mat_<uchar> mask1_, mask2_;
Mat_<uchar> contour1mask_, contour2mask_;
Mat_<float> gradx1_, grady1_;
Mat_<float> gradx2_, grady2_;
// components data
int ncomps_;
Mat_<int> labels_;
std::vector<ComponentState> states_;
std::vector<Point> tls_, brs_;
std::vector<std::vector<Point> > contours_;
std::set<std::pair<int, int> > edges_;
};
/** @brief Base class for all minimum graph-cut-based seam estimators.
*/
class CV_EXPORTS GraphCutSeamFinderBase
{
public:
enum CostType { COST_COLOR, COST_COLOR_GRAD };
};
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 .
*/
class CV_EXPORTS_W GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
{
public:
GraphCutSeamFinder(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f);
CV_WRAP GraphCutSeamFinder(String cost_type,float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f);
~GraphCutSeamFinder();
CV_WRAP void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) CV_OVERRIDE;
private:
// To avoid GCGraph dependency
class Impl;
Ptr<PairwiseSeamFinder> impl_;
};
#ifdef HAVE_OPENCV_CUDALEGACY
class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder
{
public:
GraphCutSeamFinderGpu(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
float bad_region_penalty = 1000.f)
: cost_type_(cost_type), terminal_cost_(terminal_cost),
bad_region_penalty_(bad_region_penalty) {}
void find(const std::vector<cv::UMat> &src, const std::vector<cv::Point> &corners,
std::vector<cv::UMat> &masks) CV_OVERRIDE;
void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE;
private:
void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2,
cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);
void setGraphWeightsColorGrad(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &dx1, const cv::Mat &dx2,
const cv::Mat &dy1, const cv::Mat &dy2, const cv::Mat &mask1, const cv::Mat &mask2,
cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);
std::vector<Mat> dx_, dy_;
int cost_type_;
float terminal_cost_;
float bad_region_penalty_;
};
#endif
//! @}
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_SEAM_FINDERS_HPP

View File

@@ -0,0 +1,91 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_TIMELAPSERS_HPP
#define OPENCV_STITCHING_TIMELAPSERS_HPP
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
// Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_.
class CV_EXPORTS_W Timelapser
{
public:
enum {AS_IS, CROP};
virtual ~Timelapser() {}
CV_WRAP static Ptr<Timelapser> createDefault(int type);
CV_WRAP virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_WRAP virtual void process(InputArray img, InputArray mask, Point tl);
CV_WRAP virtual const UMat& getDst() {return dst_;}
protected:
virtual bool test_point(Point pt);
UMat dst_;
Rect dst_roi_;
};
class CV_EXPORTS_W TimelapserCrop : public Timelapser
{
public:
virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes) CV_OVERRIDE;
};
//! @}
} // namespace detail
} // namespace cv
#endif // OPENCV_STITCHING_TIMELAPSERS_HPP

View File

@@ -0,0 +1,121 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_UTIL_HPP
#define OPENCV_STITCHING_UTIL_HPP
#include <list>
#include "opencv2/core.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
class CV_EXPORTS DisjointSets
{
public:
DisjointSets(int elem_count = 0) { createOneElemSets(elem_count); }
void createOneElemSets(int elem_count);
int findSetByElem(int elem);
int mergeSets(int set1, int set2);
std::vector<int> parent;
std::vector<int> size;
private:
std::vector<int> rank_;
};
struct CV_EXPORTS GraphEdge
{
GraphEdge(int from, int to, float weight);
bool operator <(const GraphEdge& other) const { return weight < other.weight; }
bool operator >(const GraphEdge& other) const { return weight > other.weight; }
int from, to;
float weight;
};
inline GraphEdge::GraphEdge(int _from, int _to, float _weight) : from(_from), to(_to), weight(_weight) {}
class CV_EXPORTS Graph
{
public:
Graph(int num_vertices = 0) { create(num_vertices); }
void create(int num_vertices) { edges_.assign(num_vertices, std::list<GraphEdge>()); }
int numVertices() const { return static_cast<int>(edges_.size()); }
void addEdge(int from, int to, float weight);
template <typename B> B forEach(B body) const;
template <typename B> B walkBreadthFirst(int from, B body) const;
private:
std::vector< std::list<GraphEdge> > edges_;
};
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
CV_EXPORTS_W bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
CV_EXPORTS_W Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images);
CV_EXPORTS_W Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS_W Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes);
CV_EXPORTS_W Point resultTl(const std::vector<Point> &corners);
// Returns random 'count' element subset of the {0,1,...,size-1} set
CV_EXPORTS_W void selectRandomSubset(int count, int size, std::vector<int> &subset);
CV_EXPORTS_W int& stitchingLogLevel();
//! @}
} // namespace detail
} // namespace cv
#include "util_inl.hpp"
#endif // OPENCV_STITCHING_UTIL_HPP

View File

@@ -0,0 +1,131 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_UTIL_INL_HPP
#define OPENCV_STITCHING_UTIL_INL_HPP
#include <queue>
#include "opencv2/core.hpp"
#include "util.hpp" // Make your IDE see declarations
//! @cond IGNORED
namespace cv {
namespace detail {
template <typename B>
B Graph::forEach(B body) const
{
for (int i = 0; i < numVertices(); ++i)
{
std::list<GraphEdge>::const_iterator edge = edges_[i].begin();
for (; edge != edges_[i].end(); ++edge)
body(*edge);
}
return body;
}
template <typename B>
B Graph::walkBreadthFirst(int from, B body) const
{
std::vector<bool> was(numVertices(), false);
std::queue<int> vertices;
was[from] = true;
vertices.push(from);
while (!vertices.empty())
{
int vertex = vertices.front();
vertices.pop();
std::list<GraphEdge>::const_iterator edge = edges_[vertex].begin();
for (; edge != edges_[vertex].end(); ++edge)
{
if (!was[edge->to])
{
body(*edge);
was[edge->to] = true;
vertices.push(edge->to);
}
}
}
return body;
}
//////////////////////////////////////////////////////////////////////////////
// Some auxiliary math functions
static inline
float normL2(const Point3f& a)
{
return a.x * a.x + a.y * a.y + a.z * a.z;
}
static inline
float normL2(const Point3f& a, const Point3f& b)
{
return normL2(a - b);
}
static inline
double normL2sq(const Mat &r)
{
return r.dot(r);
}
static inline int sqr(int x) { return x * x; }
static inline float sqr(float x) { return x * x; }
static inline double sqr(double x) { return x * x; }
} // namespace detail
} // namespace cv
//! @endcond
#endif // OPENCV_STITCHING_UTIL_INL_HPP

View File

@@ -0,0 +1,682 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_WARPERS_HPP
#define OPENCV_STITCHING_WARPERS_HPP
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/opencv_modules.hpp"
namespace cv {
namespace detail {
//! @addtogroup stitching_warp
//! @{
/** @brief Rotation-only model image warper interface.
*/
class CV_EXPORTS RotationWarper
{
public:
virtual ~RotationWarper() {}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0;
/** @brief Projects the image point backward.
@param pt Projected point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Backward-projected point
*/
#if CV_VERSION_MAJOR == 4
virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
{
CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R);
CV_Error(Error::StsNotImplemented, "");
}
#else
virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) = 0;
#endif
/** @brief Builds the projection maps according to the given camera data.
@param src_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) = 0;
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
CV_OUT OutputArray dst) = 0;
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, CV_OUT OutputArray dst) = 0;
/**
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
virtual Rect warpRoi(Size src_size, InputArray K, InputArray R) = 0;
virtual float getScale() const { return 1.f; }
virtual void setScale(float) {}
};
/** @brief Base class for warping logic implementation.
*/
struct CV_EXPORTS_W_SIMPLE ProjectorBase
{
void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F),
InputArray R = Mat::eye(3, 3, CV_32F),
InputArray T = Mat::zeros(3, 1, CV_32F));
float scale;
float k[9];
float rinv[9];
float r_kinv[9];
float k_rinv[9];
float t[3];
};
/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class.
*/
template <class P>
class CV_EXPORTS_TEMPLATE RotationWarperBase : public RotationWarper
{
public:
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) CV_OVERRIDE;
void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) CV_OVERRIDE;
Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
float getScale() const CV_OVERRIDE{ return projector_.scale; }
void setScale(float val) CV_OVERRIDE { projector_.scale = val; }
protected:
// Detects ROI of the destination image. It's correct for any projection.
virtual void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
// Detects ROI of the destination image by walking over image border.
// Correctness for any projection isn't guaranteed.
void detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br);
P projector_;
};
struct CV_EXPORTS PlaneProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
/** @brief Warper that maps an image onto the z = 1 plane.
*/
class CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector>
{
public:
/** @brief Construct an instance of the plane warper class.
@param scale Projected image scale multiplier
*/
PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE;
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);
Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) CV_OVERRIDE;
Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T);
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap);
Rect buildMaps(Size src_size, InputArray K, InputArray R, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, CV_OUT OutputArray dst) CV_OVERRIDE;
virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
CV_OUT OutputArray dst);
Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE;
Rect warpRoi(Size src_size, InputArray K, InputArray R, InputArray T);
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
};
/** @brief Affine warper that uses rotations and translations
Uses affine transformation in homogeneous coordinates to represent both rotation and
translation in camera rotation matrix.
*/
class CV_EXPORTS AffineWarper : public PlaneWarper
{
public:
/** @brief Construct an instance of the affine warper class.
@param scale Projected image scale multiplier
*/
AffineWarper(float scale = 1.f) : PlaneWarper(scale) {}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param H Camera extrinsic parameters
@return Projected point
*/
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE;
/** @brief Projects the image point backward.
@param pt Projected point
@param K Camera intrinsic parameters
@param H Camera extrinsic parameters
@return Backward-projected point
*/
Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE;
/** @brief Builds the projection maps according to the given camera data.
@param src_size Source image size
@param K Camera intrinsic parameters
@param H Camera extrinsic parameters
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
Rect buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param H Camera extrinsic parameters
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
Point warp(InputArray src, InputArray K, InputArray H,
int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
/**
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param H Camera extrinsic parameters
@return Projected image minimum bounding box
*/
Rect warpRoi(Size src_size, InputArray K, InputArray H) CV_OVERRIDE;
protected:
/** @brief Extracts rotation and translation matrices from matrix H representing
affine transformation in homogeneous coordinates
*/
void getRTfromHomogeneous(InputArray H, Mat &R, Mat &T);
};
struct CV_EXPORTS_W_SIMPLE SphericalProjector : ProjectorBase
{
CV_WRAP void mapForward(float x, float y, float &u, float &v);
CV_WRAP void mapBackward(float u, float v, float &x, float &y);
};
/** @brief Warper that maps an image onto the unit sphere located at the origin.
Projects image onto unit sphere with origin at (0, 0, 0) and radius scale, measured in pixels.
A 360 panorama would therefore have a resulting width of 2 * scale * PI pixels.
Poles are located at (0, -1, 0) and (0, 1, 0) points.
*/
class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>
{
public:
/** @brief Construct an instance of the spherical warper class.
@param scale Radius of the projected sphere, in pixels. An image spanning the
whole sphere will have a width of 2 * scale * PI pixels.
*/
SphericalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
};
struct CV_EXPORTS CylindricalProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
/** @brief Warper that maps an image onto the x\*x + z\*z = 1 cylinder.
*/
class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector>
{
public:
/** @brief Construct an instance of the cylindrical warper class.
@param scale Projected image scale multiplier
*/
CylindricalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE;
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE;
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{
RotationWarperBase<CylindricalProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
}
};
struct CV_EXPORTS FisheyeProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS FisheyeWarper : public RotationWarperBase<FisheyeProjector>
{
public:
FisheyeWarper(float scale) { projector_.scale = scale; }
};
struct CV_EXPORTS StereographicProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS StereographicWarper : public RotationWarperBase<StereographicProjector>
{
public:
StereographicWarper(float scale) { projector_.scale = scale; }
};
struct CV_EXPORTS CompressedRectilinearProjector : ProjectorBase
{
float a, b;
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS CompressedRectilinearWarper : public RotationWarperBase<CompressedRectilinearProjector>
{
public:
CompressedRectilinearWarper(float scale, float A = 1, float B = 1)
{
projector_.a = A;
projector_.b = B;
projector_.scale = scale;
}
};
struct CV_EXPORTS CompressedRectilinearPortraitProjector : ProjectorBase
{
float a, b;
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS CompressedRectilinearPortraitWarper : public RotationWarperBase<CompressedRectilinearPortraitProjector>
{
public:
CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1)
{
projector_.a = A;
projector_.b = B;
projector_.scale = scale;
}
};
struct CV_EXPORTS PaniniProjector : ProjectorBase
{
float a, b;
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS PaniniWarper : public RotationWarperBase<PaniniProjector>
{
public:
PaniniWarper(float scale, float A = 1, float B = 1)
{
projector_.a = A;
projector_.b = B;
projector_.scale = scale;
}
};
struct CV_EXPORTS PaniniPortraitProjector : ProjectorBase
{
float a, b;
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS PaniniPortraitWarper : public RotationWarperBase<PaniniPortraitProjector>
{
public:
PaniniPortraitWarper(float scale, float A = 1, float B = 1)
{
projector_.a = A;
projector_.b = B;
projector_.scale = scale;
}
};
struct CV_EXPORTS MercatorProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS MercatorWarper : public RotationWarperBase<MercatorProjector>
{
public:
MercatorWarper(float scale) { projector_.scale = scale; }
};
struct CV_EXPORTS TransverseMercatorProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS TransverseMercatorWarper : public RotationWarperBase<TransverseMercatorProjector>
{
public:
TransverseMercatorWarper(float scale) { projector_.scale = scale; }
};
class CV_EXPORTS PlaneWarperGpu : public PlaneWarper
{
public:
PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap);
d_ymap_.download(ymap);
return result;
}
Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{
Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_);
d_xmap_.download(xmap);
d_ymap_.download(ymap);
return result;
}
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) CV_OVERRIDE
{
d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
d_dst_.download(dst);
return result;
}
Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
OutputArray dst) CV_OVERRIDE
{
d_src_.upload(src);
Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_);
d_dst_.download(dst);
return result;
}
Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
cuda::GpuMat & dst);
Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
cuda::GpuMat & dst);
private:
cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
};
class CV_EXPORTS SphericalWarperGpu : public SphericalWarper
{
public:
SphericalWarperGpu(float scale) : SphericalWarper(scale) {}
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap);
d_ymap_.download(ymap);
return result;
}
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) CV_OVERRIDE
{
d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
d_dst_.download(dst);
return result;
}
Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
cuda::GpuMat & dst);
private:
cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
};
class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper
{
public:
CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE
{
Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
d_xmap_.download(xmap);
d_ymap_.download(ymap);
return result;
}
Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) CV_OVERRIDE
{
d_src_.upload(src);
Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
d_dst_.download(dst);
return result;
}
Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap);
Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode,
cuda::GpuMat & dst);
private:
cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
};
struct CV_EXPORTS SphericalPortraitProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
// Projects image onto unit sphere with origin at (0, 0, 0).
// Poles are located NOT at (0, -1, 0) and (0, 1, 0) points, BUT at (1, 0, 0) and (-1, 0, 0) points.
class CV_EXPORTS SphericalPortraitWarper : public RotationWarperBase<SphericalPortraitProjector>
{
public:
SphericalPortraitWarper(float scale) { projector_.scale = scale; }
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE;
};
struct CV_EXPORTS CylindricalPortraitProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS CylindricalPortraitWarper : public RotationWarperBase<CylindricalPortraitProjector>
{
public:
CylindricalPortraitWarper(float scale) { projector_.scale = scale; }
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{
RotationWarperBase<CylindricalPortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
}
};
struct CV_EXPORTS PlanePortraitProjector : ProjectorBase
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
};
class CV_EXPORTS PlanePortraitWarper : public RotationWarperBase<PlanePortraitProjector>
{
public:
PlanePortraitWarper(float scale) { projector_.scale = scale; }
protected:
void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE
{
RotationWarperBase<PlanePortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
}
};
//! @} stitching_warp
} // namespace detail
} // namespace cv
#include "warpers_inl.hpp"
#endif // OPENCV_STITCHING_WARPERS_HPP

View File

@@ -0,0 +1,782 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_WARPERS_INL_HPP
#define OPENCV_STITCHING_WARPERS_INL_HPP
#include "opencv2/core.hpp"
#include "warpers.hpp" // Make your IDE see declarations
#include <limits>
//! @cond IGNORED
namespace cv {
namespace detail {
template <class P>
Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, InputArray K, InputArray R)
{
projector_.setCameraParams(K, R);
Point2f uv;
projector_.mapForward(pt.x, pt.y, uv.x, uv.y);
return uv;
}
template <class P>
Point2f RotationWarperBase<P>::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
{
projector_.setCameraParams(K, R);
Point2f xy;
projector_.mapBackward(pt.x, pt.y, xy.x, xy.y);
return xy;
}
template <class P>
Rect RotationWarperBase<P>::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap)
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
_xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
_ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();
float x, y;
for (int v = dst_tl.y; v <= dst_br.y; ++v)
{
for (int u = dst_tl.x; u <= dst_br.x; ++u)
{
projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
}
}
return Rect(dst_tl, dst_br);
}
template <class P>
Point RotationWarperBase<P>::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst)
{
UMat xmap, ymap;
Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, xmap, ymap, interp_mode, border_mode);
return dst_roi.tl();
}
template <class P>
void RotationWarperBase<P>::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst)
{
projector_.setCameraParams(K, R);
Point src_tl, src_br;
detectResultRoi(dst_size, src_tl, src_br);
Size size = src.size();
CV_Assert(src_br.x - src_tl.x + 1 == size.width && src_br.y - src_tl.y + 1 == size.height);
Mat xmap(dst_size, CV_32F);
Mat ymap(dst_size, CV_32F);
float u, v;
for (int y = 0; y < dst_size.height; ++y)
{
for (int x = 0; x < dst_size.width; ++x)
{
projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
xmap.at<float>(y, x) = u - src_tl.x;
ymap.at<float>(y, x) = v - src_tl.y;
}
}
dst.create(dst_size, src.type());
remap(src, dst, xmap, ymap, interp_mode, border_mode);
}
template <class P>
Rect RotationWarperBase<P>::warpRoi(Size src_size, InputArray K, InputArray R)
{
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));
}
template <class P>
void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{
float tl_uf = (std::numeric_limits<float>::max)();
float tl_vf = (std::numeric_limits<float>::max)();
float br_uf = -(std::numeric_limits<float>::max)();
float br_vf = -(std::numeric_limits<float>::max)();
float u, v;
for (int y = 0; y < src_size.height; ++y)
{
for (int x = 0; x < src_size.width; ++x)
{
projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
}
}
dst_tl.x = static_cast<int>(tl_uf);
dst_tl.y = static_cast<int>(tl_vf);
dst_br.x = static_cast<int>(br_uf);
dst_br.y = static_cast<int>(br_vf);
}
template <class P>
void RotationWarperBase<P>::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br)
{
float tl_uf = (std::numeric_limits<float>::max)();
float tl_vf = (std::numeric_limits<float>::max)();
float br_uf = -(std::numeric_limits<float>::max)();
float br_vf = -(std::numeric_limits<float>::max)();
float u, v;
for (float x = 0; x < src_size.width; ++x)
{
projector_.mapForward(static_cast<float>(x), 0, u, v);
tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size.height - 1), u, v);
tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
}
for (int y = 0; y < src_size.height; ++y)
{
projector_.mapForward(0, static_cast<float>(y), u, v);
tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(y), u, v);
tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v);
br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v);
}
dst_tl.x = static_cast<int>(tl_uf);
dst_tl.y = static_cast<int>(tl_vf);
dst_br.x = static_cast<int>(br_uf);
dst_br.y = static_cast<int>(br_vf);
}
inline
void PlaneProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
x_ = t[0] + x_ / z_ * (1 - t[2]);
y_ = t[1] + y_ / z_ * (1 - t[2]);
u = scale * x_;
v = scale * y_;
}
inline
void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
{
u = u / scale - t[0];
v = v / scale - t[1];
float z;
x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2] * (1 - t[2]);
y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5] * (1 - t[2]);
z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8] * (1 - t[2]);
x /= z;
y /= z;
}
inline
void SphericalProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
u = scale * atan2f(x_, z_);
float w = y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_);
v = scale * (static_cast<float>(CV_PI) - acosf(w == w ? w : 0));
}
inline
void SphericalProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float sinv = sinf(static_cast<float>(CV_PI) - v);
float x_ = sinv * sinf(u);
float y_ = cosf(static_cast<float>(CV_PI) - v);
float z_ = sinv * cosf(u);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
u = scale * atan2f(x_, z_);
v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
}
inline
void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float x_ = sinf(u);
float y_ = v;
float z_ = cosf(u);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void FisheyeProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
u = scale * v_ * cosf(u_);
v = scale * v_ * sinf(u_);
}
inline
void FisheyeProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float u_ = atan2f(v, u);
float v_ = sqrtf(u*u + v*v);
float sinv = sinf((float)CV_PI - v_);
float x_ = sinv * sinf(u_);
float y_ = cosf((float)CV_PI - v_);
float z_ = sinv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void StereographicProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
float r = sinf(v_) / (1 - cosf(v_));
u = scale * r * std::cos(u_);
v = scale * r * std::sin(u_);
}
inline
void StereographicProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float u_ = atan2f(v, u);
float r = sqrtf(u*u + v*v);
float v_ = 2 * atanf(1.f / r);
float sinv = sinf((float)CV_PI - v_);
float x_ = sinv * sinf(u_);
float y_ = cosf((float)CV_PI - v_);
float z_ = sinv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void CompressedRectilinearProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
u = scale * a * tanf(u_ / a);
v = scale * b * tanf(v_) / cosf(u_);
}
inline
void CompressedRectilinearProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float aatg = a * atanf(u / a);
float u_ = aatg;
float v_ = atanf(v * cosf(aatg) / b);
float cosv = cosf(v_);
float x_ = cosv * sinf(u_);
float y_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void CompressedRectilinearPortraitProjector::mapForward(float x, float y, float &u, float &v)
{
float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
u = - scale * a * tanf(u_ / a);
v = scale * b * tanf(v_) / cosf(u_);
}
inline
void CompressedRectilinearPortraitProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= - scale;
v /= scale;
float aatg = a * atanf(u / a);
float u_ = aatg;
float v_ = atanf(v * cosf( aatg ) / b);
float cosv = cosf(v_);
float y_ = cosv * sinf(u_);
float x_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void PaniniProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
float tg = a * tanf(u_ / a);
u = scale * tg;
float sinu = sinf(u_);
if ( fabs(sinu) < 1E-7 )
v = scale * b * tanf(v_);
else
v = scale * b * tg * tanf(v_) / sinu;
}
inline
void PaniniProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float lamda = a * atanf(u / a);
float u_ = lamda;
float v_;
if ( fabs(lamda) > 1E-7)
v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda / a)));
else
v_ = atanf(v / b);
float cosv = cosf(v_);
float x_ = cosv * sinf(u_);
float y_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void PaniniPortraitProjector::mapForward(float x, float y, float &u, float &v)
{
float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
float tg = a * tanf(u_ / a);
u = - scale * tg;
float sinu = sinf( u_ );
if ( fabs(sinu) < 1E-7 )
v = scale * b * tanf(v_);
else
v = scale * b * tg * tanf(v_) / sinu;
}
inline
void PaniniPortraitProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= - scale;
v /= scale;
float lamda = a * atanf(u / a);
float u_ = lamda;
float v_;
if ( fabs(lamda) > 1E-7)
v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda/a)));
else
v_ = atanf(v / b);
float cosv = cosf(v_);
float y_ = cosv * sinf(u_);
float x_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void MercatorProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
u = scale * u_;
v = scale * logf( tanf( (float)(CV_PI/4) + v_/2 ) );
}
inline
void MercatorProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float v_ = atanf( sinhf(v) );
float u_ = u;
float cosv = cosf(v_);
float x_ = cosv * sinf(u_);
float y_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void TransverseMercatorProjector::mapForward(float x, float y, float &u, float &v)
{
float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float u_ = atan2f(x_, z_);
float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
float B = cosf(v_) * sinf(u_);
u = scale / 2 * logf( (1+B) / (1-B) );
v = scale * atan2f(tanf(v_), cosf(u_));
}
inline
void TransverseMercatorProjector::mapBackward(float u, float v, float &x, float &y)
{
u /= scale;
v /= scale;
float v_ = asinf( sinf(v) / coshf(u) );
float u_ = atan2f( sinhf(u), std::cos(v) );
float cosv = cosf(v_);
float x_ = cosv * sinf(u_);
float y_ = sinf(v_);
float z_ = cosv * cosf(u_);
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void SphericalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)
{
float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float x_ = y0_;
float y_ = x0_;
float u, v;
u = scale * atan2f(x_, z_);
v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));
u0 = -u;//v;
v0 = v;//u;
}
inline
void SphericalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
{
float u, v;
u = -u0;//v0;
v = v0;//u0;
u /= scale;
v /= scale;
float sinv = sinf(static_cast<float>(CV_PI) - v);
float x0_ = sinv * sinf(u);
float y0_ = cosf(static_cast<float>(CV_PI) - v);
float z_ = sinv * cosf(u);
float x_ = y0_;
float y_ = x0_;
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void CylindricalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)
{
float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float x_ = y0_;
float y_ = x0_;
float u, v;
u = scale * atan2f(x_, z_);
v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
u0 = -u;//v;
v0 = v;//u;
}
inline
void CylindricalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
{
float u, v;
u = -u0;//v0;
v = v0;//u0;
u /= scale;
v /= scale;
float x0_ = sinf(u);
float y0_ = v;
float z_ = cosf(u);
float x_ = y0_;
float y_ = x0_;
float z;
x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
inline
void PlanePortraitProjector::mapForward(float x, float y, float &u0, float &v0)
{
float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
float x_ = y0_;
float y_ = x0_;
x_ = t[0] + x_ / z_ * (1 - t[2]);
y_ = t[1] + y_ / z_ * (1 - t[2]);
float u,v;
u = scale * x_;
v = scale * y_;
u0 = -u;
v0 = v;
}
inline
void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
{
float u, v;
u = -u0;
v = v0;
u = u / scale - t[0];
v = v / scale - t[1];
float z;
x = k_rinv[0] * v + k_rinv[1] * u + k_rinv[2] * (1 - t[2]);
y = k_rinv[3] * v + k_rinv[4] * u + k_rinv[5] * (1 - t[2]);
z = k_rinv[6] * v + k_rinv[7] * u + k_rinv[8] * (1 - t[2]);
x /= z;
y /= z;
}
} // namespace detail
} // namespace cv
//! @endcond
#endif // OPENCV_STITCHING_WARPERS_INL_HPP

View File

@@ -0,0 +1,277 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_STITCHING_WARPER_CREATORS_HPP
#define OPENCV_STITCHING_WARPER_CREATORS_HPP
#include "opencv2/stitching/detail/warpers.hpp"
#include <string>
namespace cv {
class CV_EXPORTS_W PyRotationWarper
{
Ptr<detail::RotationWarper> rw;
public:
CV_WRAP PyRotationWarper(String type, float scale);
CV_WRAP PyRotationWarper() {};
~PyRotationWarper() {}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
CV_WRAP Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R);
/** @brief Projects the image point backward.
@param pt Projected point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Backward-projected point
*/
#if CV_VERSION_MAJOR == 4
CV_WRAP Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
{
CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R);
CV_Error(Error::StsNotImplemented, "");
}
#else
CV_WRAP Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R);
#endif
/** @brief Builds the projection maps according to the given camera data.
@param src_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
CV_WRAP Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
CV_WRAP Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
CV_OUT OutputArray dst);
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp_mode Interpolation mode
@param border_mode Border extrapolation mode
@param dst_size Backward-projected image size
@param dst Backward-projected image
*/
CV_WRAP void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, CV_OUT OutputArray dst);
/**
@param src_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
CV_WRAP Rect warpRoi(Size src_size, InputArray K, InputArray R);
CV_WRAP float getScale() const { return 1.f; }
CV_WRAP void setScale(float) {}
};
//! @addtogroup stitching_warp
//! @{
/** @brief Image warper factories base class.
*/
class CV_EXPORTS_W WarperCreator
{
public:
CV_WRAP virtual ~WarperCreator() {}
virtual Ptr<detail::RotationWarper> create(float scale) const = 0;
};
/** @brief Plane warper factory class.
@sa detail::PlaneWarper
*/
class CV_EXPORTS PlaneWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PlaneWarper>(scale); }
};
/** @brief Affine warper factory class.
@sa detail::AffineWarper
*/
class CV_EXPORTS AffineWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::AffineWarper>(scale); }
};
/** @brief Cylindrical warper factory class.
@sa detail::CylindricalWarper
*/
class CV_EXPORTS CylindricalWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CylindricalWarper>(scale); }
};
/** @brief Spherical warper factory class */
class CV_EXPORTS SphericalWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::SphericalWarper>(scale); }
};
class CV_EXPORTS FisheyeWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::FisheyeWarper>(scale); }
};
class CV_EXPORTS StereographicWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::StereographicWarper>(scale); }
};
class CV_EXPORTS CompressedRectilinearWarper: public WarperCreator
{
float a, b;
public:
CompressedRectilinearWarper(float A = 1, float B = 1)
{
a = A; b = B;
}
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearWarper>(scale, a, b); }
};
class CV_EXPORTS CompressedRectilinearPortraitWarper: public WarperCreator
{
float a, b;
public:
CompressedRectilinearPortraitWarper(float A = 1, float B = 1)
{
a = A; b = B;
}
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CompressedRectilinearPortraitWarper>(scale, a, b); }
};
class CV_EXPORTS PaniniWarper: public WarperCreator
{
float a, b;
public:
PaniniWarper(float A = 1, float B = 1)
{
a = A; b = B;
}
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniWarper>(scale, a, b); }
};
class CV_EXPORTS PaniniPortraitWarper: public WarperCreator
{
float a, b;
public:
PaniniPortraitWarper(float A = 1, float B = 1)
{
a = A; b = B;
}
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PaniniPortraitWarper>(scale, a, b); }
};
class CV_EXPORTS MercatorWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::MercatorWarper>(scale); }
};
class CV_EXPORTS TransverseMercatorWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::TransverseMercatorWarper>(scale); }
};
#ifdef HAVE_OPENCV_CUDAWARPING
class PlaneWarperGpu: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::PlaneWarperGpu>(scale); }
};
class CylindricalWarperGpu: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::CylindricalWarperGpu>(scale); }
};
class SphericalWarperGpu: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const CV_OVERRIDE { return makePtr<detail::SphericalWarperGpu>(scale); }
};
#endif
//! @} stitching_warp
} // namespace cv
#endif // OPENCV_STITCHING_WARPER_CREATORS_HPP

View File

@@ -0,0 +1,49 @@
#ifdef HAVE_OPENCV_STITCHING
typedef Stitcher::Status Status;
typedef Stitcher::Mode Mode;
typedef std::vector<detail::ImageFeatures> vector_ImageFeatures;
typedef std::vector<detail::MatchesInfo> vector_MatchesInfo;
typedef std::vector<detail::CameraParams> vector_CameraParams;
template<> struct pyopencvVecConverter<detail::ImageFeatures>
{
static bool to(PyObject* obj, std::vector<detail::ImageFeatures>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::ImageFeatures>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<detail::MatchesInfo>
{
static bool to(PyObject* obj, std::vector<detail::MatchesInfo>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::MatchesInfo>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<> struct pyopencvVecConverter<detail::CameraParams>
{
static bool to(PyObject* obj, std::vector<detail::CameraParams>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<detail::CameraParams>& value)
{
return pyopencv_from_generic_vec(value);
}
};
#endif

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python
import cv2 as cv
from tests_common import NewOpenCVTests
class stitching_test(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
(_result, pano) = stitcher.stitch((img1, img2))
#cv.imshow("pano", pano)
#cv.waitKey()
self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape))
self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape))
class stitching_detail_test(NewOpenCVTests):
def test_simple(self):
img = self.get_sample('stitching/a1.png')
finder= cv.ORB.create()
imgFea = cv.detail.computeImageFeatures2(finder,img)
self.assertIsNotNone(imgFea)
matcher = cv.detail_BestOf2NearestMatcher(False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_BestOf2NearestRangeMatcher(2, False, 0.3)
self.assertIsNotNone(matcher)
estimator = cv.detail_AffineBasedEstimator()
self.assertIsNotNone(estimator)
estimator = cv.detail_HomographyBasedEstimator()
self.assertIsNotNone(estimator)
adjuster = cv.detail_BundleAdjusterReproj()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterRay()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterAffinePartial()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_NoBundleAdjuster()
self.assertIsNotNone(adjuster)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_NO)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN_BLOCKS)
self.assertIsNotNone(compensator)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD")
self.assertIsNotNone(seam_finder)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTI_BAND)
self.assertIsNotNone(blender)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_AS_IS);
self.assertIsNotNone(timelapser)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_CROP);
self.assertIsNotNone(timelapser)
class stitching_compose_panorama_test_no_args(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
stitcher.estimateTransform((img1, img2))
result, _ = stitcher.composePanorama()
assert result == 0
class stitching_compose_panorama_args(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
stitcher.estimateTransform((img1, img2))
result, _ = stitcher.composePanorama((img1, img2))
assert result == 0
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@@ -0,0 +1,147 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
using namespace perf;
namespace ocl {
#define SURF_MATCH_CONFIDENCE 0.65f
#define ORB_MATCH_CONFIDENCE 0.3f
#define WORK_MEGAPIX 0.6
typedef TestBaseWithParam<string> stitch;
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
#define TEST_DETECTORS testing::Values("surf", "orb", "akaze")
#else
#define TEST_DETECTORS testing::Values("orb", "akaze")
#endif
OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
{
UMat pano;
vector<Mat> _imgs;
_imgs.push_back( imread( getDataPath("stitching/a1.png") ) );
_imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
_imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1182, 50);
EXPECT_NEAR(pano.size().height, 682, 30);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS)
{
UMat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1124, GetParam() == "surf" ? 100 : 50);
EXPECT_NEAR(pano.size().height, 644, GetParam() == "surf" ? 60 : 30);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS)
{
Size expected_dst_size(10789, 2663);
checkDeviceMaxMemoryAllocSize(expected_dst_size, CV_16SC3, 4);
#if defined(_WIN32) && !defined(_WIN64)
if (cv::ocl::useOpenCL())
throw ::perf::TestBase::PerfSkipTestException();
#endif
UMat pano;
vector<Mat> _imgs;
_imgs.push_back( imread( getDataPath("stitching/boat1.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat2.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat3.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat4.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat5.jpg") ) );
_imgs.push_back( imread( getDataPath("stitching/boat6.jpg") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, expected_dst_size.width, 200);
EXPECT_NEAR(pano.size().height, expected_dst_size.height, 100);
SANITY_CHECK_NOTHING();
}
} } // namespace opencv_test::ocl
#endif // HAVE_OPENCL

View File

@@ -0,0 +1,162 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../perf_precomp.hpp"
#include "opencv2/stitching/warpers.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
///////////////////////// Stitching Warpers ///////////////////////////
enum
{
SphericalWarperType = 0,
CylindricalWarperType = 1,
PlaneWarperType = 2,
AffineWarperType = 3,
};
class WarperBase
{
public:
explicit WarperBase(int type, Size srcSize)
{
Ptr<WarperCreator> creator;
if (type == SphericalWarperType)
creator = makePtr<SphericalWarper>();
else if (type == CylindricalWarperType)
creator = makePtr<CylindricalWarper>();
else if (type == PlaneWarperType)
creator = makePtr<PlaneWarper>();
else if (type == AffineWarperType)
creator = makePtr<AffineWarper>();
CV_Assert(!creator.empty());
K = Mat::eye(3, 3, CV_32FC1);
K.at<float>(0,0) = (float)srcSize.width;
K.at<float>(0,2) = (float)srcSize.width/2;
K.at<float>(1,1) = (float)srcSize.height;
K.at<float>(1,2) = (float)srcSize.height/2;
K.at<float>(2,2) = 1.0f;
R = Mat::eye(3, 3, CV_32FC1);
float scale = (float)srcSize.width;
warper = creator->create(scale);
}
Rect buildMaps(Size src_size, OutputArray xmap, OutputArray ymap) const
{
return warper->buildMaps(src_size, K, R, xmap, ymap);
}
Point warp(InputArray src, int interp_mode, int border_mode, OutputArray dst) const
{
return warper->warp(src, K, R, interp_mode, border_mode, dst);
}
private:
Ptr<detail::RotationWarper> warper;
Mat K, R;
};
CV_ENUM(WarperType, SphericalWarperType, CylindricalWarperType, PlaneWarperType, AffineWarperType)
typedef tuple<Size, WarperType> StitchingWarpersParams;
typedef TestBaseWithParam<StitchingWarpersParams> StitchingWarpersFixture;
static void prepareWarperSrc(InputOutputArray src, Size srcSize)
{
src.create(srcSize, CV_8UC1);
src.setTo(Scalar::all(64));
ellipse(src, Point(srcSize.width/2, srcSize.height/2), Size(srcSize.width/2, srcSize.height/2),
360, 0, 360, Scalar::all(255), 2);
ellipse(src, Point(srcSize.width/2, srcSize.height/2), Size(srcSize.width/3, srcSize.height/3),
360, 0, 360, Scalar::all(128), 2);
rectangle(src, Point(10, 10), Point(srcSize.width - 10, srcSize.height - 10), Scalar::all(128), 2);
}
OCL_PERF_TEST_P(StitchingWarpersFixture, StitchingWarpers_BuildMaps,
::testing::Combine(OCL_TEST_SIZES, WarperType::all()))
{
const StitchingWarpersParams params = GetParam();
const Size srcSize = get<0>(params);
const WarperBase warper(get<1>(params), srcSize);
UMat xmap, ymap;
OCL_TEST_CYCLE() warper.buildMaps(srcSize, xmap, ymap);
SANITY_CHECK(xmap, 1e-3);
SANITY_CHECK(ymap, 1e-3);
}
OCL_PERF_TEST_P(StitchingWarpersFixture, StitchingWarpers_Warp,
::testing::Combine(OCL_TEST_SIZES, WarperType::all()))
{
const StitchingWarpersParams params = GetParam();
const Size srcSize = get<0>(params);
const WarperBase warper(get<1>(params), srcSize);
UMat src, dst;
prepareWarperSrc(src, srcSize);
declare.in(src, WARMUP_READ);
OCL_TEST_CYCLE() warper.warp(src, INTER_LINEAR, BORDER_REPLICATE, dst);
#if 0
namedWindow("src", WINDOW_NORMAL);
namedWindow("dst", WINDOW_NORMAL);
imshow("src", src);
imshow("dst", dst);
std::cout << dst.size() << " " << dst.size().area() << std::endl;
cv::waitKey();
#endif
SANITY_CHECK(dst, 1e-5);
}
} } // namespace opencv_test::ocl
#endif // HAVE_OPENCL

View File

@@ -0,0 +1,96 @@
#include "perf_precomp.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/opencv_modules.hpp"
namespace opencv_test
{
using namespace perf;
typedef TestBaseWithParam<tuple<string, string> > bundleAdjuster;
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
#define TEST_DETECTORS testing::Values("surf", "orb")
#else
#define TEST_DETECTORS testing::Values<string>("orb")
#endif
#define WORK_MEGAPIX 0.6
#define AFFINE_FUNCTIONS testing::Values("affinePartial", "affine")
PERF_TEST_P(bundleAdjuster, affine, testing::Combine(TEST_DETECTORS, AFFINE_FUNCTIONS))
{
Mat img1, img1_full = imread(getDataPath("stitching/s1.jpg"));
Mat img2, img2_full = imread(getDataPath("stitching/s2.jpg"));
float scale1 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img1_full.total()));
float scale2 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img2_full.total()));
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
string detector = get<0>(GetParam());
string affine_fun = get<1>(GetParam());
Ptr<Feature2D> finder = getFeatureFinder(detector);
Ptr<detail::FeaturesMatcher> matcher;
Ptr<detail::BundleAdjusterBase> bundle_adjuster;
if (affine_fun == "affinePartial")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false);
bundle_adjuster = makePtr<detail::BundleAdjusterAffinePartial>();
}
else if (affine_fun == "affine")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(true);
bundle_adjuster = makePtr<detail::BundleAdjusterAffine>();
}
Ptr<detail::Estimator> estimator = makePtr<detail::AffineBasedEstimator>();
std::vector<Mat> images;
images.push_back(img1), images.push_back(img2);
std::vector<detail::ImageFeatures> features;
std::vector<detail::MatchesInfo> pairwise_matches;
std::vector<detail::CameraParams> cameras;
std::vector<detail::CameraParams> cameras2;
computeImageFeatures(finder, images, features);
(*matcher)(features, pairwise_matches);
if (!(*estimator)(features, pairwise_matches, cameras))
FAIL() << "estimation failed. this should never happen.";
// this is currently required
for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
}
cameras2 = cameras;
bool success = true;
while(next())
{
cameras = cameras2; // revert cameras back to original initial guess
startTimer();
success = (*bundle_adjuster)(features, pairwise_matches, cameras);
stopTimer();
}
EXPECT_TRUE(success);
EXPECT_TRUE(cameras.size() == 2);
// fist camera should be just identity
Mat &first = cameras[0].R;
SANITY_CHECK(first, 1e-3, ERROR_ABSOLUTE);
// second camera should be the estimated transform between images
// separate rotation and translation in transform matrix
Mat T_second (cameras[1].R, Range(0, 2), Range(2, 3));
Mat R_second (cameras[1].R, Range(0, 2), Range(0, 2));
Mat h (cameras[1].R, Range(2, 3), Range::all());
SANITY_CHECK(T_second, 5, ERROR_ABSOLUTE); // allow 5 pixels diff in translations
SANITY_CHECK(R_second, .01, ERROR_ABSOLUTE); // rotations must be more precise
// last row should be precisely (0, 0, 1) as it is just added for representation in homogeneous
// coordinates
EXPECT_TRUE(h.type() == CV_32F);
EXPECT_FLOAT_EQ(h.at<float>(0), 0.f);
EXPECT_FLOAT_EQ(h.at<float>(1), 0.f);
EXPECT_FLOAT_EQ(h.at<float>(2), 1.f);
}
} // namespace

View File

@@ -0,0 +1,7 @@
#include "perf_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_PERF_TEST_MAIN(stitching)

View File

@@ -0,0 +1,293 @@
#include "perf_precomp.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/flann.hpp"
namespace opencv_test
{
using namespace perf;
typedef TestBaseWithParam<size_t> FeaturesFinderVec;
typedef TestBaseWithParam<string> match;
typedef tuple<string, int> matchVector_t;
typedef TestBaseWithParam<matchVector_t> matchVector;
#define NUMBER_IMAGES testing::Values(1, 5, 20)
#define SURF_MATCH_CONFIDENCE 0.65f
#define ORB_MATCH_CONFIDENCE 0.3f
#define WORK_MEGAPIX 0.6
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
#define TEST_DETECTORS testing::Values("surf", "orb")
#else
#define TEST_DETECTORS testing::Values<string>("orb")
#endif
PERF_TEST_P(FeaturesFinderVec, ParallelFeaturesFinder, NUMBER_IMAGES)
{
Mat img = imread( getDataPath("stitching/a1.png") );
vector<Mat> imgs(GetParam(), img);
vector<detail::ImageFeatures> features(imgs.size());
Ptr<Feature2D> finder = ORB::create();
TEST_CYCLE()
{
detail::computeImageFeatures(finder, imgs, features);
}
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(FeaturesFinderVec, SerialFeaturesFinder, NUMBER_IMAGES)
{
Mat img = imread( getDataPath("stitching/a1.png") );
vector<Mat> imgs(GetParam(), img);
vector<detail::ImageFeatures> features(imgs.size());
Ptr<Feature2D> finder = ORB::create();
TEST_CYCLE()
{
for (size_t i = 0; i < imgs.size(); ++i)
detail::computeImageFeatures(finder, imgs[i], features[i]);
}
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( match, bestOf2Nearest, TEST_DETECTORS)
{
Mat img1, img1_full = imread( getDataPath("stitching/boat1.jpg") );
Mat img2, img2_full = imread( getDataPath("stitching/boat2.jpg") );
float scale1 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img1_full.total()));
float scale2 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img2_full.total()));
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<Feature2D> finder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> matcher;
if (GetParam() == "surf")
{
matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (GetParam() == "orb")
{
matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
{
FAIL() << "Unknown 2D features type: " << GetParam();
}
detail::ImageFeatures features1, features2;
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
detail::MatchesInfo pairwise_matches;
declare.in(features1.descriptors, features2.descriptors);
while(next())
{
cvflann::seed_random(42);//for predictive FlannBasedMatcher
startTimer();
(*matcher)(features1, features2, pairwise_matches);
stopTimer();
matcher->collectGarbage();
}
Mat dist (pairwise_matches.H, Range::all(), Range(2, 3));
Mat R (pairwise_matches.H, Range::all(), Range(0, 2));
// separate transform matrix, use lower error on rotations
SANITY_CHECK(dist, 3., ERROR_ABSOLUTE);
SANITY_CHECK(R, .06, ERROR_ABSOLUTE);
}
PERF_TEST_P( matchVector, bestOf2NearestVectorFeatures, testing::Combine(
TEST_DETECTORS,
testing::Values(2, 4, 8))
)
{
Mat img1, img1_full = imread( getDataPath("stitching/boat1.jpg") );
Mat img2, img2_full = imread( getDataPath("stitching/boat2.jpg") );
float scale1 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img1_full.total()));
float scale2 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img2_full.total()));
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
string detectorName = get<0>(GetParam());
int featuresVectorSize = get<1>(GetParam());
Ptr<Feature2D> finder = getFeatureFinder(detectorName);
Ptr<detail::FeaturesMatcher> matcher;
if (detectorName == "surf")
{
matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (detectorName == "orb")
{
matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
{
FAIL() << "Unknown 2D features type: " << get<0>(GetParam());
}
detail::ImageFeatures features1, features2;
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
vector<detail::ImageFeatures> features;
vector<detail::MatchesInfo> pairwise_matches;
for(int i = 0; i < featuresVectorSize/2; i++)
{
features.push_back(features1);
features.push_back(features2);
}
declare.time(200);
while(next())
{
cvflann::seed_random(42);//for predictive FlannBasedMatcher
startTimer();
(*matcher)(features, pairwise_matches);
stopTimer();
matcher->collectGarbage();
}
size_t matches_count = 0;
for (size_t i = 0; i < pairwise_matches.size(); ++i)
{
if (pairwise_matches[i].src_img_idx < 0)
continue;
EXPECT_GT(pairwise_matches[i].matches.size(), 95u);
EXPECT_FALSE(pairwise_matches[i].H.empty());
++matches_count;
}
EXPECT_GT(matches_count, 0u);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( match, affineBestOf2Nearest, TEST_DETECTORS)
{
Mat img1, img1_full = imread( getDataPath("stitching/s1.jpg") );
Mat img2, img2_full = imread( getDataPath("stitching/s2.jpg") );
float scale1 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img1_full.total()));
float scale2 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img2_full.total()));
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<Feature2D> finder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> matcher;
if (GetParam() == "surf")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, SURF_MATCH_CONFIDENCE);
}
else if (GetParam() == "orb")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, ORB_MATCH_CONFIDENCE);
}
else
{
FAIL() << "Unknown 2D features type: " << GetParam();
}
detail::ImageFeatures features1, features2;
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
detail::MatchesInfo pairwise_matches;
declare.in(features1.descriptors, features2.descriptors);
while(next())
{
cvflann::seed_random(42);//for predictive FlannBasedMatcher
startTimer();
(*matcher)(features1, features2, pairwise_matches);
stopTimer();
matcher->collectGarbage();
}
// separate rotation and translation in transform matrix
Mat T (pairwise_matches.H, Range(0, 2), Range(2, 3));
Mat R (pairwise_matches.H, Range(0, 2), Range(0, 2));
Mat h (pairwise_matches.H, Range(2, 3), Range::all());
SANITY_CHECK(T, 5, ERROR_ABSOLUTE); // allow 5 pixels diff in translations
SANITY_CHECK(R, .01, ERROR_ABSOLUTE); // rotations must be more precise
// last row should be precisely (0, 0, 1) as it is just added for representation in homogeneous
// coordinates
EXPECT_DOUBLE_EQ(h.at<double>(0), 0.);
EXPECT_DOUBLE_EQ(h.at<double>(1), 0.);
EXPECT_DOUBLE_EQ(h.at<double>(2), 1.);
}
PERF_TEST_P( matchVector, affineBestOf2NearestVectorFeatures, testing::Combine(
TEST_DETECTORS,
testing::Values(2, 4, 8))
)
{
Mat img1, img1_full = imread( getDataPath("stitching/s1.jpg") );
Mat img2, img2_full = imread( getDataPath("stitching/s2.jpg") );
float scale1 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img1_full.total()));
float scale2 = (float)std::min(1.0, sqrt(WORK_MEGAPIX * 1e6 / img2_full.total()));
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
string detectorName = get<0>(GetParam());
int featuresVectorSize = get<1>(GetParam());
Ptr<Feature2D> finder = getFeatureFinder(detectorName);
Ptr<detail::FeaturesMatcher> matcher;
if (detectorName == "surf")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, SURF_MATCH_CONFIDENCE);
}
else if (detectorName == "orb")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, ORB_MATCH_CONFIDENCE);
}
else
{
FAIL() << "Unknown 2D features type: " << get<0>(GetParam());
}
detail::ImageFeatures features1, features2;
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
vector<detail::ImageFeatures> features;
vector<detail::MatchesInfo> pairwise_matches;
for(int i = 0; i < featuresVectorSize/2; i++)
{
features.push_back(features1);
features.push_back(features2);
}
declare.time(200);
while(next())
{
cvflann::seed_random(42);//for predictive FlannBasedMatcher
startTimer();
(*matcher)(features, pairwise_matches);
stopTimer();
matcher->collectGarbage();
}
size_t matches_count = 0;
for (size_t i = 0; i < pairwise_matches.size(); ++i)
{
if (pairwise_matches[i].src_img_idx < 0)
continue;
EXPECT_GT(pairwise_matches[i].matches.size(), 150u);
EXPECT_FALSE(pairwise_matches[i].H.empty());
++matches_count;
}
EXPECT_GT(matches_count, 0u);
SANITY_CHECK_NOTHING();
}
} // namespace

View File

@@ -0,0 +1,30 @@
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/stitching.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
namespace cv
{
static inline Ptr<Feature2D> getFeatureFinder(const std::string& name)
{
if (name == "orb")
return ORB::create();
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
else if (name == "surf")
return xfeatures2d::SURF::create();
#endif
else if (name == "akaze")
return AKAZE::create();
else
return Ptr<Feature2D>();
}
} // namespace cv
#endif

View File

@@ -0,0 +1,253 @@
#include "perf_precomp.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core/ocl.hpp"
namespace opencv_test
{
using namespace perf;
#define SURF_MATCH_CONFIDENCE 0.65f
#define ORB_MATCH_CONFIDENCE 0.3f
#define WORK_MEGAPIX 0.6
typedef TestBaseWithParam<string> stitch;
typedef TestBaseWithParam<int> stitchExposureCompensation;
typedef TestBaseWithParam<tuple<string, string> > stitchDatasets;
typedef TestBaseWithParam<tuple<string, int>> stitchExposureCompMultiFeed;
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
#define TEST_DETECTORS testing::Values("surf", "orb", "akaze")
#else
#define TEST_DETECTORS testing::Values("orb", "akaze")
#endif
#define TEST_EXP_COMP_BS testing::Values(32, 16, 12, 10, 8)
#define TEST_EXP_COMP_NR_FEED testing::Values(1, 2, 3, 4, 5)
#define TEST_EXP_COMP_MODE testing::Values("gain", "channels", "blocks_gain", "blocks_channels")
#define AFFINE_DATASETS testing::Values("s", "budapest", "newspaper", "prague")
PERF_TEST_P(stitch, a123, TEST_DETECTORS)
{
Mat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/a1.png") ) );
imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.time(30 * 20).iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1182, 50);
EXPECT_NEAR(pano.size().height, 682, 30);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(stitchExposureCompensation, a123, TEST_EXP_COMP_BS)
{
Mat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/a1.png") ) );
imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
int bs = GetParam();
declare.time(30 * 10).iterations(10);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
stitcher->setExposureCompensator(
makePtr<detail::BlocksGainCompensator>(bs, bs));
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1182, 50);
EXPECT_NEAR(pano.size().height, 682, 30);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(stitchExposureCompMultiFeed, a123, testing::Combine(TEST_EXP_COMP_MODE, TEST_EXP_COMP_NR_FEED))
{
const int block_size = 32;
Mat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/a1.png") ) );
imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
string mode = get<0>(GetParam());
int nr_feeds = get<1>(GetParam());
declare.time(30 * 10).iterations(10);
Ptr<detail::ExposureCompensator> exp_comp;
if (mode == "gain")
exp_comp = makePtr<detail::GainCompensator>(nr_feeds);
else if (mode == "channels")
exp_comp = makePtr<detail::ChannelsCompensator>(nr_feeds);
else if (mode == "blocks_gain")
exp_comp = makePtr<detail::BlocksGainCompensator>(block_size, block_size, nr_feeds);
else if (mode == "blocks_channels")
exp_comp = makePtr<detail::BlocksChannelsCompensator>(block_size, block_size, nr_feeds);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
stitcher->setExposureCompensator(exp_comp);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1182, 50);
EXPECT_NEAR(pano.size().height, 682, 30);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(stitch, b12, TEST_DETECTORS)
{
Mat pano;
vector<Mat> imgs;
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.time(30 * 20).iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, 1117, GetParam() == "surf" ? 100 : 50);
EXPECT_NEAR(pano.size().height, 642, GetParam() == "surf" ? 60 : 30);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETECTORS))
{
string dataset = get<0>(GetParam());
string detector = get<1>(GetParam());
Mat pano;
vector<Mat> imgs;
int width, height, allowed_diff = 20;
Ptr<Feature2D> featuresFinder = getFeatureFinder(detector);
if(dataset == "budapest")
{
imgs.push_back(imread(getDataPath("stitching/budapest1.jpg")));
imgs.push_back(imread(getDataPath("stitching/budapest2.jpg")));
imgs.push_back(imread(getDataPath("stitching/budapest3.jpg")));
imgs.push_back(imread(getDataPath("stitching/budapest4.jpg")));
imgs.push_back(imread(getDataPath("stitching/budapest5.jpg")));
imgs.push_back(imread(getDataPath("stitching/budapest6.jpg")));
width = 2313;
height = 1158;
// this dataset is big, the results between surf and orb differ slightly,
// but both are still good
allowed_diff = 50;
// we need to boost ORB number of features to be able to stitch this dataset
// SURF works just fine with default settings
if(detector == "orb")
featuresFinder = ORB::create(1500);
}
else if (dataset == "newspaper")
{
imgs.push_back(imread(getDataPath("stitching/newspaper1.jpg")));
imgs.push_back(imread(getDataPath("stitching/newspaper2.jpg")));
imgs.push_back(imread(getDataPath("stitching/newspaper3.jpg")));
imgs.push_back(imread(getDataPath("stitching/newspaper4.jpg")));
width = 1791;
height = 1136;
// we need to boost ORB number of features to be able to stitch this dataset
// SURF works just fine with default settings
if(detector == "orb")
featuresFinder = ORB::create(3000);
}
else if (dataset == "prague")
{
imgs.push_back(imread(getDataPath("stitching/prague1.jpg")));
imgs.push_back(imread(getDataPath("stitching/prague2.jpg")));
width = 983;
height = 1759;
}
else // dataset == "s"
{
imgs.push_back(imread(getDataPath("stitching/s1.jpg")));
imgs.push_back(imread(getDataPath("stitching/s2.jpg")));
width = 1815;
height = 700;
}
declare.time(30 * 20).iterations(20);
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::SCANS);
stitcher->setFeaturesFinder(featuresFinder);
if (cv::ocl::useOpenCL())
cv::theRNG() = cv::RNG(12345); // prevent fails of Windows OpenCL builds (see #8294)
startTimer();
stitcher->stitch(imgs, pano);
stopTimer();
}
EXPECT_NEAR(pano.size().width, width, allowed_diff);
EXPECT_NEAR(pano.size().height, height, allowed_diff);
SANITY_CHECK_NOTHING();
}
} // namespace

View File

@@ -0,0 +1,194 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/hal/hal.hpp"
using namespace cv;
namespace {
static inline bool decomposeCholesky(double* A, size_t astep, int m)
{
if (!hal::Cholesky64f(A, astep, m, 0, 0, 0))
return false;
return true;
}
} // namespace
namespace cv {
namespace detail {
void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, bool &f1_ok)
{
CV_Assert(H.type() == CV_64F && H.size() == Size(3, 3));
const double* h = H.ptr<double>();
double d1, d2; // Denominators
double v1, v2; // Focal squares value candidates
f1_ok = true;
d1 = h[6] * h[7];
d2 = (h[7] - h[6]) * (h[7] + h[6]);
v1 = -(h[0] * h[1] + h[3] * h[4]) / d1;
v2 = (h[0] * h[0] + h[3] * h[3] - h[1] * h[1] - h[4] * h[4]) / d2;
if (v1 < v2) std::swap(v1, v2);
if (v1 > 0 && v2 > 0) f1 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
else if (v1 > 0) f1 = std::sqrt(v1);
else f1_ok = false;
f0_ok = true;
d1 = h[0] * h[3] + h[1] * h[4];
d2 = h[0] * h[0] + h[1] * h[1] - h[3] * h[3] - h[4] * h[4];
v1 = -h[2] * h[5] / d1;
v2 = (h[5] * h[5] - h[2] * h[2]) / d2;
if (v1 < v2) std::swap(v1, v2);
if (v1 > 0 && v2 > 0) f0 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
else if (v1 > 0) f0 = std::sqrt(v1);
else f0_ok = false;
}
void estimateFocal(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
std::vector<double> &focals)
{
const int num_images = static_cast<int>(features.size());
focals.resize(num_images);
std::vector<double> all_focals;
for (int i = 0; i < num_images; ++i)
{
for (int j = 0; j < num_images; ++j)
{
const MatchesInfo &m = pairwise_matches[i*num_images + j];
if (m.H.empty())
continue;
double f0, f1;
bool f0ok, f1ok;
focalsFromHomography(m.H, f0, f1, f0ok, f1ok);
if (f0ok && f1ok)
all_focals.push_back(std::sqrt(f0 * f1));
}
}
if (static_cast<int>(all_focals.size()) >= num_images - 1)
{
double median;
std::sort(all_focals.begin(), all_focals.end());
if (all_focals.size() % 2 == 1)
median = all_focals[all_focals.size() / 2];
else
median = (all_focals[all_focals.size() / 2 - 1] + all_focals[all_focals.size() / 2]) * 0.5;
for (int i = 0; i < num_images; ++i)
focals[i] = median;
}
else
{
LOGLN("Can't estimate focal length, will use naive approach");
double focals_sum = 0;
for (int i = 0; i < num_images; ++i)
focals_sum += features[i].img_size.width + features[i].img_size.height;
for (int i = 0; i < num_images; ++i)
focals[i] = focals_sum / num_images;
}
}
bool calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K)
{
int m = static_cast<int>(Hs.size());
CV_Assert(m >= 1);
std::vector<Mat> Hs_(m);
for (int i = 0; i < m; ++i)
{
CV_Assert(Hs[i].size() == Size(3, 3) && Hs[i].type() == CV_64F);
Hs_[i] = Hs[i] / std::pow(determinant(Hs[i]), 1./3.);
}
const int idx_map[3][3] = {{0, 1, 2}, {1, 3, 4}, {2, 4, 5}};
Mat_<double> A(6*m, 6);
A.setTo(0);
int eq_idx = 0;
for (int k = 0; k < m; ++k)
{
Mat_<double> H(Hs_[k]);
for (int i = 0; i < 3; ++i)
{
for (int j = i; j < 3; ++j, ++eq_idx)
{
for (int l = 0; l < 3; ++l)
{
for (int s = 0; s < 3; ++s)
{
int idx = idx_map[l][s];
A(eq_idx, idx) += H(i,l) * H(j,s);
}
}
A(eq_idx, idx_map[i][j]) -= 1;
}
}
}
Mat_<double> wcoef;
SVD::solveZ(A, wcoef);
Mat_<double> W(3,3);
for (int i = 0; i < 3; ++i)
for (int j = i; j < 3; ++j)
W(i,j) = W(j,i) = wcoef(idx_map[i][j], 0) / wcoef(5,0);
if (!decomposeCholesky(W.ptr<double>(), W.step, 3))
return false;
W(0,1) = W(0,2) = W(1,2) = 0;
K = W.t();
return true;
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,903 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencl_kernels_stitching.hpp"
#ifdef HAVE_CUDA
namespace cv { namespace cuda { namespace device
{
namespace blend
{
void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight,
PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc);
void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight,
PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc);
void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src,
const int width, const int height);
void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src,
const int width, const int height);
}
}}}
#endif
namespace cv {
namespace detail {
static const float WEIGHT_EPS = 1e-5f;
Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
{
if (type == NO)
return makePtr<Blender>();
if (type == FEATHER)
return makePtr<FeatherBlender>(try_gpu);
if (type == MULTI_BAND)
return makePtr<MultiBandBlender>(try_gpu);
CV_Error(Error::StsBadArg, "unsupported blending method");
}
void Blender::prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes)
{
prepare(resultRoi(corners, sizes));
}
void Blender::prepare(Rect dst_roi)
{
dst_.create(dst_roi.size(), CV_16SC3);
dst_.setTo(Scalar::all(0));
dst_mask_.create(dst_roi.size(), CV_8U);
dst_mask_.setTo(Scalar::all(0));
dst_roi_ = dst_roi;
}
void Blender::feed(InputArray _img, InputArray _mask, Point tl)
{
Mat img = _img.getMat();
Mat mask = _mask.getMat();
Mat dst = dst_.getMat(ACCESS_RW);
Mat dst_mask = dst_mask_.getMat(ACCESS_RW);
CV_Assert(img.type() == CV_16SC3);
CV_Assert(mask.type() == CV_8U);
int dx = tl.x - dst_roi_.x;
int dy = tl.y - dst_roi_.y;
for (int y = 0; y < img.rows; ++y)
{
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
const uchar *mask_row = mask.ptr<uchar>(y);
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
for (int x = 0; x < img.cols; ++x)
{
if (mask_row[x])
dst_row[dx + x] = src_row[x];
dst_mask_row[dx + x] |= mask_row[x];
}
}
}
void Blender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
UMat mask;
compare(dst_mask_, 0, mask, CMP_EQ);
dst_.setTo(Scalar::all(0), mask);
dst.assign(dst_);
dst_mask.assign(dst_mask_);
dst_.release();
dst_mask_.release();
}
void FeatherBlender::prepare(Rect dst_roi)
{
Blender::prepare(dst_roi);
dst_weight_map_.create(dst_roi.size(), CV_32F);
dst_weight_map_.setTo(0);
}
void FeatherBlender::feed(InputArray _img, InputArray mask, Point tl)
{
Mat img = _img.getMat();
Mat dst = dst_.getMat(ACCESS_RW);
CV_Assert(img.type() == CV_16SC3);
CV_Assert(mask.type() == CV_8U);
createWeightMap(mask, sharpness_, weight_map_);
Mat weight_map = weight_map_.getMat(ACCESS_READ);
Mat dst_weight_map = dst_weight_map_.getMat(ACCESS_RW);
int dx = tl.x - dst_roi_.x;
int dy = tl.y - dst_roi_.y;
for (int y = 0; y < img.rows; ++y)
{
const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = dst.ptr<Point3_<short> >(dy + y);
const float* weight_row = weight_map.ptr<float>(y);
float* dst_weight_row = dst_weight_map.ptr<float>(dy + y);
for (int x = 0; x < img.cols; ++x)
{
dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]);
dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]);
dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]);
dst_weight_row[dx + x] += weight_row[x];
}
}
}
void FeatherBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
normalizeUsingWeightMap(dst_weight_map_, dst_);
compare(dst_weight_map_, WEIGHT_EPS, dst_mask_, CMP_GT);
Blender::blend(dst, dst_mask);
}
Rect FeatherBlender::createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<UMat> &weight_maps)
{
weight_maps.resize(masks.size());
for (size_t i = 0; i < masks.size(); ++i)
createWeightMap(masks[i], sharpness_, weight_maps[i]);
Rect dst_roi = resultRoi(corners, masks);
Mat weights_sum(dst_roi.size(), CV_32F);
weights_sum.setTo(0);
for (size_t i = 0; i < weight_maps.size(); ++i)
{
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
weight_maps[i].cols, weight_maps[i].rows);
add(weights_sum(roi), weight_maps[i], weights_sum(roi));
}
for (size_t i = 0; i < weight_maps.size(); ++i)
{
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
weight_maps[i].cols, weight_maps[i].rows);
Mat tmp = weights_sum(roi);
tmp.setTo(1, tmp < std::numeric_limits<float>::epsilon());
divide(weight_maps[i], tmp, weight_maps[i]);
}
return dst_roi;
}
MultiBandBlender::MultiBandBlender(int try_gpu, int num_bands, int weight_type)
{
num_bands_ = 0;
setNumBands(num_bands);
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
can_use_gpu_ = try_gpu && cuda::getCudaEnabledDeviceCount();
gpu_feed_idx_ = 0;
#else
CV_UNUSED(try_gpu);
can_use_gpu_ = false;
#endif
CV_Assert(weight_type == CV_32F || weight_type == CV_16S);
weight_type_ = weight_type;
}
void MultiBandBlender::prepare(Rect dst_roi)
{
dst_roi_final_ = dst_roi;
// Crop unnecessary bands
double max_len = static_cast<double>(std::max(dst_roi.width, dst_roi.height));
num_bands_ = std::min(actual_num_bands_, static_cast<int>(ceil(std::log(max_len) / std::log(2.0))));
// Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);
Blender::prepare(dst_roi);
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
if (can_use_gpu_)
{
gpu_initialized_ = false;
gpu_feed_idx_ = 0;
gpu_tl_points_.clear();
gpu_weight_pyr_gauss_vec_.clear();
gpu_src_pyr_laplace_vec_.clear();
gpu_ups_.clear();
gpu_imgs_with_border_.clear();
gpu_dst_pyr_laplace_.resize(num_bands_ + 1);
gpu_dst_pyr_laplace_[0].create(dst_roi.size(), CV_16SC3);
gpu_dst_pyr_laplace_[0].setTo(Scalar::all(0));
gpu_dst_band_weights_.resize(num_bands_ + 1);
gpu_dst_band_weights_[0].create(dst_roi.size(), weight_type_);
gpu_dst_band_weights_[0].setTo(0);
for (int i = 1; i <= num_bands_; ++i)
{
gpu_dst_pyr_laplace_[i].create((gpu_dst_pyr_laplace_[i - 1].rows + 1) / 2,
(gpu_dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
gpu_dst_band_weights_[i].create((gpu_dst_band_weights_[i - 1].rows + 1) / 2,
(gpu_dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
gpu_dst_band_weights_[i].setTo(0);
}
}
else
#endif
{
dst_pyr_laplace_.resize(num_bands_ + 1);
dst_pyr_laplace_[0] = dst_;
dst_band_weights_.resize(num_bands_ + 1);
dst_band_weights_[0].create(dst_roi.size(), weight_type_);
dst_band_weights_[0].setTo(0);
for (int i = 1; i <= num_bands_; ++i)
{
dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2,
(dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
(dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
dst_pyr_laplace_[i].setTo(Scalar::all(0));
dst_band_weights_[i].setTo(0);
}
}
}
#ifdef HAVE_OPENCL
static bool ocl_MultiBandBlender_feed(InputArray _src, InputArray _weight,
InputOutputArray _dst, InputOutputArray _dst_weight)
{
String buildOptions = "-D DEFINE_feed";
ocl::buildOptionsAddMatrixDescription(buildOptions, "src", _src);
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
ocl::buildOptionsAddMatrixDescription(buildOptions, "dst", _dst);
ocl::buildOptionsAddMatrixDescription(buildOptions, "dstWeight", _dst_weight);
ocl::Kernel k("feed", ocl::stitching::multibandblend_oclsrc, buildOptions);
if (k.empty())
return false;
UMat src = _src.getUMat();
k.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::ReadOnly(_weight.getUMat()),
ocl::KernelArg::ReadWrite(_dst.getUMat()),
ocl::KernelArg::ReadWrite(_dst_weight.getUMat())
);
size_t globalsize[2] = {(size_t)src.cols, (size_t)src.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
{
#if ENABLE_LOG
int64 t = getTickCount();
#endif
UMat img;
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
// If using gpu save the top left coordinate when running first time after prepare
if (can_use_gpu_)
{
if (!gpu_initialized_)
{
gpu_tl_points_.push_back(tl);
}
else
{
tl = gpu_tl_points_[gpu_feed_idx_];
}
}
// If _img is not a GpuMat get it as UMat from the InputArray object.
// If it is GpuMat make a dummy object with right dimensions but no data and
// get _img as a GpuMat
if (!_img.isGpuMat())
#endif
{
img = _img.getUMat();
}
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
else
{
gpu_img_ = _img.getGpuMat();
img = UMat(gpu_img_.rows, gpu_img_.cols, gpu_img_.type());
}
#endif
CV_Assert(img.type() == CV_16SC3 || img.type() == CV_8UC3);
CV_Assert(mask.type() == CV_8U);
// Keep source image in memory with small border
int gap = 3 * (1 << num_bands_);
Point tl_new(std::max(dst_roi_.x, tl.x - gap),
std::max(dst_roi_.y, tl.y - gap));
Point br_new(std::min(dst_roi_.br().x, tl.x + img.cols + gap),
std::min(dst_roi_.br().y, tl.y + img.rows + gap));
// Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_).
// After that scale between layers is exactly 2.
//
// We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when
// image is bordered to have size equal to the final image size, but this is too memory hungry approach.
tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_);
tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_);
int width = br_new.x - tl_new.x;
int height = br_new.y - tl_new.y;
width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_);
height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_);
br_new.x = tl_new.x + width;
br_new.y = tl_new.y + height;
int dy = std::max(br_new.y - dst_roi_.br().y, 0);
int dx = std::max(br_new.x - dst_roi_.br().x, 0);
tl_new.x -= dx; br_new.x -= dx;
tl_new.y -= dy; br_new.y -= dy;
int top = tl.y - tl_new.y;
int left = tl.x - tl_new.x;
int bottom = br_new.y - tl.y - img.rows;
int right = br_new.x - tl.x - img.cols;
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
if (can_use_gpu_)
{
if (!gpu_initialized_)
{
gpu_imgs_with_border_.push_back(cuda::GpuMat());
gpu_weight_pyr_gauss_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
gpu_src_pyr_laplace_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_));
}
// If _img is not GpuMat upload it to gpu else gpu_img_ was set already
if (!_img.isGpuMat())
{
gpu_img_.upload(img);
}
// Create the source image Laplacian pyramid
cuda::copyMakeBorder(gpu_img_, gpu_imgs_with_border_[gpu_feed_idx_], top, bottom,
left, right, BORDER_REFLECT);
gpu_imgs_with_border_[gpu_feed_idx_].convertTo(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][0], CV_16S);
for (int i = 0; i < num_bands_; ++i)
cuda::pyrDown(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1]);
for (int i = 0; i < num_bands_; ++i)
{
cuda::pyrUp(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1], gpu_ups_[gpu_feed_idx_][i]);
cuda::subtract(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
gpu_ups_[gpu_feed_idx_][i],
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i]);
}
// Create the weight map Gaussian pyramid only if not yet initialized
if (!gpu_initialized_)
{
if (mask.isGpuMat())
{
gpu_mask_ = mask.getGpuMat();
}
else
{
gpu_mask_.upload(mask);
}
if (weight_type_ == CV_32F)
{
gpu_mask_.convertTo(gpu_weight_map_, CV_32F, 1. / 255.);
}
else // weight_type_ == CV_16S
{
gpu_mask_.convertTo(gpu_weight_map_, CV_16S);
cuda::compare(gpu_mask_, 0, gpu_add_mask_, CMP_NE);
cuda::add(gpu_weight_map_, Scalar::all(1), gpu_weight_map_, gpu_add_mask_);
}
cuda::copyMakeBorder(gpu_weight_map_, gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][0], top,
bottom, left, right, BORDER_CONSTANT);
for (int i = 0; i < num_bands_; ++i)
cuda::pyrDown(gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i],
gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i + 1]);
}
int y_tl = tl_new.y - dst_roi_.y;
int y_br = br_new.y - dst_roi_.y;
int x_tl = tl_new.x - dst_roi_.x;
int x_br = br_new.x - dst_roi_.x;
// Add weighted layer of the source image to the final Laplacian pyramid layer
for (int i = 0; i <= num_bands_; ++i)
{
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
cuda::GpuMat &_src_pyr_laplace = gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i];
cuda::GpuMat _dst_pyr_laplace = gpu_dst_pyr_laplace_[i](rc);
cuda::GpuMat &_weight_pyr_gauss = gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i];
cuda::GpuMat _dst_band_weights = gpu_dst_band_weights_[i](rc);
using namespace cv::cuda::device::blend;
if (weight_type_ == CV_32F)
{
addSrcWeightGpu32F(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
}
else
{
addSrcWeightGpu16S(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
}
x_tl /= 2; y_tl /= 2;
x_br /= 2; y_br /= 2;
}
++gpu_feed_idx_;
return;
}
#endif
// Create the source image Laplacian pyramid
UMat img_with_border;
copyMakeBorder(_img, img_with_border, top, bottom, left, right,
BORDER_REFLECT);
LOGLN(" Add border to the source image, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
std::vector<UMat> src_pyr_laplace;
createLaplacePyr(img_with_border, num_bands_, src_pyr_laplace);
LOGLN(" Create the source image Laplacian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
// Create the weight map Gaussian pyramid
UMat weight_map;
std::vector<UMat> weight_pyr_gauss(num_bands_ + 1);
if (weight_type_ == CV_32F)
{
mask.getUMat().convertTo(weight_map, CV_32F, 1./255.);
}
else // weight_type_ == CV_16S
{
mask.getUMat().convertTo(weight_map, CV_16S);
UMat add_mask;
compare(mask, 0, add_mask, CMP_NE);
add(weight_map, Scalar::all(1), weight_map, add_mask);
}
copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, BORDER_CONSTANT);
for (int i = 0; i < num_bands_; ++i)
pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);
LOGLN(" Create the weight map Gaussian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
#if ENABLE_LOG
t = getTickCount();
#endif
int y_tl = tl_new.y - dst_roi_.y;
int y_br = br_new.y - dst_roi_.y;
int x_tl = tl_new.x - dst_roi_.x;
int x_br = br_new.x - dst_roi_.x;
// Add weighted layer of the source image to the final Laplacian pyramid layer
for (int i = 0; i <= num_bands_; ++i)
{
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
#ifdef HAVE_OPENCL
if ( !cv::ocl::isOpenCLActivated() ||
!ocl_MultiBandBlender_feed(src_pyr_laplace[i], weight_pyr_gauss[i],
dst_pyr_laplace_[i](rc), dst_band_weights_[i](rc)) )
#endif
{
Mat _src_pyr_laplace = src_pyr_laplace[i].getMat(ACCESS_READ);
Mat _dst_pyr_laplace = dst_pyr_laplace_[i](rc).getMat(ACCESS_RW);
Mat _weight_pyr_gauss = weight_pyr_gauss[i].getMat(ACCESS_READ);
Mat _dst_band_weights = dst_band_weights_[i](rc).getMat(ACCESS_RW);
if (weight_type_ == CV_32F)
{
for (int y = 0; y < rc.height; ++y)
{
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
const float* weight_row = _weight_pyr_gauss.ptr<float>(y);
float* dst_weight_row = _dst_band_weights.ptr<float>(y);
for (int x = 0; x < rc.width; ++x)
{
dst_row[x].x += static_cast<short>(src_row[x].x * weight_row[x]);
dst_row[x].y += static_cast<short>(src_row[x].y * weight_row[x]);
dst_row[x].z += static_cast<short>(src_row[x].z * weight_row[x]);
dst_weight_row[x] += weight_row[x];
}
}
}
else // weight_type_ == CV_16S
{
for (int y = 0; y < y_br - y_tl; ++y)
{
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
const short* weight_row = _weight_pyr_gauss.ptr<short>(y);
short* dst_weight_row = _dst_band_weights.ptr<short>(y);
for (int x = 0; x < x_br - x_tl; ++x)
{
dst_row[x].x += short((src_row[x].x * weight_row[x]) >> 8);
dst_row[x].y += short((src_row[x].y * weight_row[x]) >> 8);
dst_row[x].z += short((src_row[x].z * weight_row[x]) >> 8);
dst_weight_row[x] += weight_row[x];
}
}
}
}
#ifdef HAVE_OPENCL
else
{
CV_IMPL_ADD(CV_IMPL_OCL);
}
#endif
x_tl /= 2; y_tl /= 2;
x_br /= 2; y_br /= 2;
}
LOGLN(" Add weighted layer of the source image to the final Laplacian pyramid layer, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}
void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
{
Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height);
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
if (can_use_gpu_)
{
if (!gpu_initialized_)
{
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
}
for (int i = 0; i <= num_bands_; ++i)
{
cuda::GpuMat dst_i = gpu_dst_pyr_laplace_[i];
cuda::GpuMat weight_i = gpu_dst_band_weights_[i];
using namespace ::cv::cuda::device::blend;
if (weight_type_ == CV_32F)
{
normalizeUsingWeightMapGpu32F(weight_i, dst_i, weight_i.cols, weight_i.rows);
}
else
{
normalizeUsingWeightMapGpu16S(weight_i, dst_i, weight_i.cols, weight_i.rows);
}
}
// Restore image from Laplacian pyramid
for (size_t i = num_bands_; i > 0; --i)
{
cuda::pyrUp(gpu_dst_pyr_laplace_[i], gpu_ups_[gpu_ups_.size()-1][num_bands_-i]);
cuda::add(gpu_ups_[gpu_ups_.size()-1][num_bands_-i],
gpu_dst_pyr_laplace_[i - 1],
gpu_dst_pyr_laplace_[i - 1]);
}
// If dst is GpuMat do masking on gpu and return dst as a GpuMat
// else download the image to cpu and return it as an ordinary Mat
if (dst.isGpuMat())
{
cuda::GpuMat &gpu_dst = dst.getGpuMatRef();
cuda::compare(gpu_dst_band_weights_[0](dst_rc), WEIGHT_EPS, gpu_dst_mask_, CMP_GT);
cuda::compare(gpu_dst_mask_, 0, gpu_mask_, CMP_EQ);
gpu_dst_pyr_laplace_[0](dst_rc).setTo(Scalar::all(0), gpu_mask_);
gpu_dst_pyr_laplace_[0](dst_rc).convertTo(gpu_dst, CV_16S);
}
else
{
gpu_dst_pyr_laplace_[0](dst_rc).download(dst_);
Mat dst_band_weights_0;
gpu_dst_band_weights_[0].download(dst_band_weights_0);
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
Blender::blend(dst, dst_mask);
}
// Set destination Mats to 0 so new image can be blended
for (size_t i = 0; i < (size_t)(num_bands_ + 1); ++i)
{
gpu_dst_band_weights_[i].setTo(0);
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
}
gpu_feed_idx_ = 0;
gpu_initialized_ = true;
}
else
#endif
{
cv::UMat dst_band_weights_0;
for (int i = 0; i <= num_bands_; ++i)
normalizeUsingWeightMap(dst_band_weights_[i], dst_pyr_laplace_[i]);
restoreImageFromLaplacePyr(dst_pyr_laplace_);
dst_ = dst_pyr_laplace_[0](dst_rc);
dst_band_weights_0 = dst_band_weights_[0];
dst_pyr_laplace_.clear();
dst_band_weights_.clear();
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
Blender::blend(dst, dst_mask);
}
}
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
#ifdef HAVE_OPENCL
static bool ocl_normalizeUsingWeightMap(InputArray _weight, InputOutputArray _mat)
{
String buildOptions = "-D DEFINE_normalizeUsingWeightMap";
ocl::buildOptionsAddMatrixDescription(buildOptions, "mat", _mat);
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
ocl::Kernel k("normalizeUsingWeightMap", ocl::stitching::multibandblend_oclsrc, buildOptions);
if (k.empty())
return false;
UMat mat = _mat.getUMat();
k.args(ocl::KernelArg::ReadWrite(mat),
ocl::KernelArg::ReadOnly(_weight.getUMat())
);
size_t globalsize[2] = {(size_t)mat.cols, (size_t)mat.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void normalizeUsingWeightMap(InputArray _weight, InputOutputArray _src)
{
Mat src;
Mat weight;
#ifdef HAVE_OPENCL
if ( !cv::ocl::isOpenCLActivated() ||
!ocl_normalizeUsingWeightMap(_weight, _src) )
#endif
{
src = _src.getMat();
weight = _weight.getMat();
CV_Assert(src.type() == CV_16SC3);
if (weight.type() == CV_32FC1)
{
for (int y = 0; y < src.rows; ++y)
{
Point3_<short> *row = src.ptr<Point3_<short> >(y);
const float *weight_row = weight.ptr<float>(y);
for (int x = 0; x < src.cols; ++x)
{
row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
}
}
}
else
{
CV_Assert(weight.type() == CV_16SC1);
for (int y = 0; y < src.rows; ++y)
{
const short *weight_row = weight.ptr<short>(y);
Point3_<short> *row = src.ptr<Point3_<short> >(y);
for (int x = 0; x < src.cols; ++x)
{
int w = weight_row[x] + 1;
row[x].x = static_cast<short>((row[x].x << 8) / w);
row[x].y = static_cast<short>((row[x].y << 8) / w);
row[x].z = static_cast<short>((row[x].z << 8) / w);
}
}
}
}
#ifdef HAVE_OPENCL
else
{
CV_IMPL_ADD(CV_IMPL_OCL);
}
#endif
}
void createWeightMap(InputArray mask, float sharpness, InputOutputArray weight)
{
CV_Assert(mask.type() == CV_8U);
distanceTransform(mask, weight, DIST_L1, 3);
UMat tmp;
multiply(weight, sharpness, tmp);
threshold(tmp, weight, 1.f, 1.f, THRESH_TRUNC);
}
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
pyr.resize(num_levels + 1);
if(img.depth() == CV_8U)
{
if(num_levels == 0)
{
img.getUMat().convertTo(pyr[0], CV_16S);
return;
}
UMat downNext;
UMat current = img.getUMat();
pyrDown(img, downNext);
for(int i = 1; i < num_levels; ++i)
{
UMat lvl_up;
UMat lvl_down;
pyrDown(downNext, lvl_down);
pyrUp(downNext, lvl_up, current.size());
subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S);
current = downNext;
downNext = lvl_down;
}
{
UMat lvl_up;
pyrUp(downNext, lvl_up, current.size());
subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);
downNext.convertTo(pyr[num_levels], CV_16S);
}
}
else
{
pyr[0] = img.getUMat();
for (int i = 0; i < num_levels; ++i)
pyrDown(pyr[i], pyr[i + 1]);
UMat tmp;
for (int i = 0; i < num_levels; ++i)
{
pyrUp(pyr[i + 1], tmp, pyr[i].size());
subtract(pyr[i], tmp, pyr[i]);
}
}
}
void createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
pyr.resize(num_levels + 1);
std::vector<cuda::GpuMat> gpu_pyr(num_levels + 1);
gpu_pyr[0].upload(img);
for (int i = 0; i < num_levels; ++i)
cuda::pyrDown(gpu_pyr[i], gpu_pyr[i + 1]);
cuda::GpuMat tmp;
for (int i = 0; i < num_levels; ++i)
{
cuda::pyrUp(gpu_pyr[i + 1], tmp);
cuda::subtract(gpu_pyr[i], tmp, gpu_pyr[i]);
gpu_pyr[i].download(pyr[i]);
}
gpu_pyr[num_levels].download(pyr[num_levels]);
#else
CV_UNUSED(img);
CV_UNUSED(num_levels);
CV_UNUSED(pyr);
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
#endif
}
void restoreImageFromLaplacePyr(std::vector<UMat> &pyr)
{
if (pyr.empty())
return;
UMat tmp;
for (size_t i = pyr.size() - 1; i > 0; --i)
{
pyrUp(pyr[i], tmp, pyr[i - 1].size());
add(tmp, pyr[i - 1], pyr[i - 1]);
}
}
void restoreImageFromLaplacePyrGpu(std::vector<UMat> &pyr)
{
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
if (pyr.empty())
return;
std::vector<cuda::GpuMat> gpu_pyr(pyr.size());
for (size_t i = 0; i < pyr.size(); ++i)
gpu_pyr[i].upload(pyr[i]);
cuda::GpuMat tmp;
for (size_t i = pyr.size() - 1; i > 0; --i)
{
cuda::pyrUp(gpu_pyr[i], tmp);
cuda::add(tmp, gpu_pyr[i - 1], gpu_pyr[i - 1]);
}
gpu_pyr[0].download(pyr[0]);
#else
CV_UNUSED(pyr);
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
#endif
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,73 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv {
namespace detail {
CameraParams::CameraParams() : focal(1), aspect(1), ppx(0), ppy(0),
R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
CameraParams::CameraParams(const CameraParams &other) { *this = other; }
CameraParams& CameraParams::operator =(const CameraParams &other)
{
focal = other.focal;
ppx = other.ppx;
ppy = other.ppy;
aspect = other.aspect;
R = other.R.clone();
t = other.t.clone();
return *this;
}
Mat CameraParams::K() const
{
Mat_<double> k = Mat::eye(3, 3, CV_64F);
k(0,0) = focal; k(0,2) = ppx;
k(1,1) = focal * aspect; k(1,2) = ppy;
return std::move(k);
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,221 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
// TODO use intrinsics like __sinf and so on
namespace build_warp_maps
{
__constant__ float ck_rinv[9];
__constant__ float cr_kinv[9];
__constant__ float ct[3];
__constant__ float cscale;
}
class PlaneMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
float x_ = u / cscale - ct[0];
float y_ = v / cscale - ct[1];
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
x /= z;
y /= z;
}
};
class CylindricalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
u /= cscale;
float x_ = ::sinf(u);
float y_ = v / cscale;
float z_ = ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
class SphericalMapper
{
public:
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
{
using namespace build_warp_maps;
v /= cscale;
u /= cscale;
float sinv = ::sinf(v);
float x_ = sinv * ::sinf(u);
float y_ = -::cosf(v);
float z_ = sinv * ::cosf(u);
float z;
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
if (z > 0) { x /= z; y /= z; }
else x = y = -1;
}
};
template <typename Mapper>
__global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
PtrStepf map_x, PtrStepf map_y)
{
int du = blockIdx.x * blockDim.x + threadIdx.x;
int dv = blockIdx.y * blockDim.y + threadIdx.y;
if (du < cols && dv < rows)
{
float u = tl_u + du;
float v = tl_v + dv;
float x, y;
Mapper::mapBackward(u, v, x, y);
map_x.ptr(dv)[du] = x;
map_y.ptr(dv)[du] = y;
}
}
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], const float t[3],
float scale, cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream)
{
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
int cols = map_x.cols;
int rows = map_x.rows;
dim3 threads(32, 8);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev {
#endif /* CUDA_DISABLER */

View File

@@ -0,0 +1,112 @@
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/types.hpp"
namespace cv { namespace cuda { namespace device
{
namespace blend
{
__global__ void addSrcWeightKernel16S(const PtrStep<short> src, const PtrStep<short> src_weight,
PtrStep<short> dst, PtrStep<short> dst_weight, int rows, int cols)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
const short3 v = ((const short3*)src.ptr(y))[x];
short w = src_weight.ptr(y)[x];
((short3*)dst.ptr(y))[x].x += short((v.x * w) >> 8);
((short3*)dst.ptr(y))[x].y += short((v.y * w) >> 8);
((short3*)dst.ptr(y))[x].z += short((v.z * w) >> 8);
dst_weight.ptr(y)[x] += w;
}
}
void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight,
PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc)
{
dim3 threads(16, 16);
dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y));
addSrcWeightKernel16S<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width);
cudaSafeCall(cudaGetLastError());
}
__global__ void addSrcWeightKernel32F(const PtrStep<short> src, const PtrStepf src_weight,
PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
const short3 v = ((const short3*)src.ptr(y))[x];
float w = src_weight.ptr(y)[x];
((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w);
((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w);
((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w);
dst_weight.ptr(y)[x] += w;
}
}
void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight,
PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc)
{
dim3 threads(16, 16);
dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y));
addSrcWeightKernel32F<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width);
cudaSafeCall(cudaGetLastError());
}
__global__ void normalizeUsingWeightKernel16S(const PtrStep<short> weight, PtrStep<short> src,
const int width, const int height)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width && y < height)
{
const short3 v = ((short3*)src.ptr(y))[x];
short w = weight.ptr(y)[x];
((short3*)src.ptr(y))[x] = make_short3(short((v.x << 8) / w),
short((v.y << 8) / w), short((v.z << 8) / w));
}
}
void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src,
const int width, const int height)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
normalizeUsingWeightKernel16S<<<grid, threads>>> (weight, src, width, height);
}
__global__ void normalizeUsingWeightKernel32F(const PtrStepf weight, PtrStep<short> src,
const int width, const int height)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < width && y < height)
{
const float WEIGHT_EPS = 1e-5f;
const short3 v = ((short3*)src.ptr(y))[x];
float w = weight.ptr(y)[x];
((short3*)src.ptr(y))[x] = make_short3(static_cast<short>(v.x / (w + WEIGHT_EPS)),
static_cast<short>(v.y / (w + WEIGHT_EPS)),
static_cast<short>(v.z / (w + WEIGHT_EPS)));
}
}
void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src,
const int width, const int height)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
normalizeUsingWeightKernel32F<<<grid, threads>>> (weight, src, width, height);
}
}
}}}
#endif

View File

@@ -0,0 +1,619 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_EIGEN
#include <Eigen/Core>
#include <Eigen/Dense>
#endif
namespace cv {
namespace detail {
Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
{
Ptr<ExposureCompensator> e;
if (type == NO)
e = makePtr<NoExposureCompensator>();
else if (type == GAIN)
e = makePtr<GainCompensator>();
else if (type == GAIN_BLOCKS)
e = makePtr<BlocksGainCompensator>();
else if (type == CHANNELS)
e = makePtr<ChannelsCompensator>();
else if (type == CHANNELS_BLOCKS)
e = makePtr<BlocksChannelsCompensator>();
if (e.get() != nullptr)
return e;
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
}
void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks)
{
std::vector<std::pair<UMat,uchar> > level_masks;
for (size_t i = 0; i < masks.size(); ++i)
level_masks.push_back(std::make_pair(masks[i], (uchar)255));
feed(corners, images, level_masks);
}
void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
LOGLN("Exposure compensation...");
#if ENABLE_LOG
int64 t = getTickCount();
#endif
const int num_images = static_cast<int>(images.size());
Mat accumulated_gains;
prepareSimilarityMask(corners, images);
for (int n = 0; n < nr_feeds_; ++n)
{
if (n > 0)
{
// Apply previous iteration gains
for (int i = 0; i < num_images; ++i)
apply(i, corners[i], images[i], masks[i].first);
}
singleFeed(corners, images, masks);
if (n == 0)
accumulated_gains = gains_.clone();
else
multiply(accumulated_gains, gains_, accumulated_gains);
}
gains_ = accumulated_gains;
LOGLN("Exposure compensation, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}
void GainCompensator::singleFeed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
CV_Assert(corners.size() == images.size() && images.size() == masks.size());
if (images.size() == 0)
return;
const int num_channels = images[0].channels();
CV_Assert(std::all_of(images.begin(), images.end(),
[num_channels](const UMat& image) { return image.channels() == num_channels; }));
CV_Assert(num_channels == 1 || num_channels == 3);
const int num_images = static_cast<int>(images.size());
Mat_<int> N(num_images, num_images); N.setTo(0);
Mat_<double> I(num_images, num_images); I.setTo(0);
Mat_<bool> skip(num_images, 1); skip.setTo(true);
Mat subimg1, subimg2;
Mat_<uchar> submask1, submask2, intersect;
std::vector<UMat>::iterator similarity_it = similarities_.begin();
for (int i = 0; i < num_images; ++i)
{
for (int j = i; j < num_images; ++j)
{
Rect roi;
if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
{
subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
submask1 = masks[i].first(Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second);
if (!similarities_.empty())
{
CV_Assert(similarity_it != similarities_.end());
UMat similarity = *similarity_it++;
// in-place operation has an issue. don't remove the swap
// detail https://github.com/opencv/opencv/issues/19184
Mat_<uchar> intersect_updated;
bitwise_and(intersect, similarity, intersect_updated);
std::swap(intersect, intersect_updated);
}
int intersect_count = countNonZero(intersect);
N(i, j) = N(j, i) = std::max(1, intersect_count);
// Don't compute Isums if subimages do not intersect anyway
if (intersect_count == 0)
continue;
// Don't skip images that intersect with at least one other image
if (i != j)
{
skip(i, 0) = false;
skip(j, 0) = false;
}
double Isum1 = 0, Isum2 = 0;
for (int y = 0; y < roi.height; ++y)
{
if (num_channels == 3)
{
const Vec<uchar, 3>* r1 = subimg1.ptr<Vec<uchar, 3> >(y);
const Vec<uchar, 3>* r2 = subimg2.ptr<Vec<uchar, 3> >(y);
for (int x = 0; x < roi.width; ++x)
{
if (intersect(y, x))
{
Isum1 += norm(r1[x]);
Isum2 += norm(r2[x]);
}
}
}
else // if (num_channels == 1)
{
const uchar* r1 = subimg1.ptr<uchar>(y);
const uchar* r2 = subimg2.ptr<uchar>(y);
for (int x = 0; x < roi.width; ++x)
{
if (intersect(y, x))
{
Isum1 += r1[x];
Isum2 += r2[x];
}
}
}
}
I(i, j) = Isum1 / N(i, j);
I(j, i) = Isum2 / N(i, j);
}
}
}
if (getUpdateGain() || gains_.rows != num_images)
{
double alpha = 0.01;
double beta = 100;
int num_eq = num_images - countNonZero(skip);
gains_.create(num_images, 1);
gains_.setTo(1);
// No image process, gains are all set to one, stop here
if (num_eq == 0)
return;
Mat_<double> A(num_eq, num_eq); A.setTo(0);
Mat_<double> b(num_eq, 1); b.setTo(0);
for (int i = 0, ki = 0; i < num_images; ++i)
{
if (skip(i, 0))
continue;
for (int j = 0, kj = 0; j < num_images; ++j)
{
if (skip(j, 0))
continue;
b(ki, 0) += beta * N(i, j);
A(ki, ki) += beta * N(i, j);
if (j != i)
{
A(ki, ki) += 2 * alpha * I(i, j) * I(i, j) * N(i, j);
A(ki, kj) -= 2 * alpha * I(i, j) * I(j, i) * N(i, j);
}
++kj;
}
++ki;
}
Mat_<double> l_gains;
#ifdef HAVE_EIGEN
Eigen::MatrixXf eigen_A, eigen_b, eigen_x;
cv2eigen(A, eigen_A);
cv2eigen(b, eigen_b);
Eigen::LLT<Eigen::MatrixXf> solver(eigen_A);
#if ENABLE_LOG
if (solver.info() != Eigen::ComputationInfo::Success)
LOGLN("Failed to solve exposure compensation system");
#endif
eigen_x = solver.solve(eigen_b);
Mat_<float> l_gains_float;
eigen2cv(eigen_x, l_gains_float);
l_gains_float.convertTo(l_gains, CV_64FC1);
#else
solve(A, b, l_gains);
#endif
CV_CheckTypeEQ(l_gains.type(), CV_64FC1, "");
for (int i = 0, j = 0; i < num_images; ++i)
{
// Only assign non-skipped gains. Other gains are already set to 1
if (!skip(i, 0))
gains_.at<double>(i, 0) = l_gains(j++, 0);
}
}
}
void GainCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
{
CV_INSTRUMENT_REGION();
multiply(image, gains_(index, 0), image);
}
std::vector<double> GainCompensator::gains() const
{
std::vector<double> gains_vec(gains_.rows);
for (int i = 0; i < gains_.rows; ++i)
gains_vec[i] = gains_(i, 0);
return gains_vec;
}
void GainCompensator::getMatGains(std::vector<Mat>& umv)
{
umv.clear();
for (int i = 0; i < gains_.rows; ++i)
umv.push_back(Mat(1,1,CV_64FC1,Scalar(gains_(i, 0))));
}
void GainCompensator::setMatGains(std::vector<Mat>& umv)
{
gains_=Mat_<double>(static_cast<int>(umv.size()),1);
for (int i = 0; i < static_cast<int>(umv.size()); i++)
{
int type = umv[i].type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_CheckType(type, depth == CV_64F && cn == 1, "Only double images are supported for gain");
CV_Assert(umv[i].rows == 1 && umv[i].cols == 1);
gains_(i, 0) = umv[i].at<double>(0, 0);
}
}
void GainCompensator::prepareSimilarityMask(
const std::vector<Point> &corners, const std::vector<UMat> &images)
{
if (similarity_threshold_ >= 1)
{
LOGLN(" skipping similarity mask: disabled");
return;
}
if (!similarities_.empty())
{
LOGLN(" skipping similarity mask: already set");
return;
}
LOGLN(" calculating similarity mask");
const int num_images = static_cast<int>(images.size());
for (int i = 0; i < num_images; ++i)
{
for (int j = i; j < num_images; ++j)
{
Rect roi;
if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
{
UMat subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i]));
UMat subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j]));
UMat similarity = buildSimilarityMask(subimg1, subimg2);
similarities_.push_back(similarity);
}
}
}
}
UMat GainCompensator::buildSimilarityMask(InputArray src_array1, InputArray src_array2)
{
CV_Assert(src_array1.rows() == src_array2.rows() && src_array1.cols() == src_array2.cols());
CV_Assert(src_array1.type() == src_array2.type());
CV_Assert(src_array1.type() == CV_8UC3 || src_array1.type() == CV_8UC1);
Mat src1 = src_array1.getMat();
Mat src2 = src_array2.getMat();
UMat umat_similarity(src1.rows, src1.cols, CV_8UC1);
Mat similarity = umat_similarity.getMat(ACCESS_WRITE);
if (src1.channels() == 3)
{
for (int y = 0; y < similarity.rows; ++y)
{
for (int x = 0; x < similarity.cols; ++x)
{
Vec<float, 3> vec_diff =
Vec<float, 3>(*src1.ptr<Vec<uchar, 3>>(y, x))
- Vec<float, 3>(*src2.ptr<Vec<uchar, 3>>(y, x));
double diff = norm(vec_diff * (1.f / 255.f));
*similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
}
}
}
else // if (src1.channels() == 1)
{
for (int y = 0; y < similarity.rows; ++y)
{
for (int x = 0; x < similarity.cols; ++x)
{
float diff = std::abs(static_cast<int>(*src1.ptr<uchar>(y, x))
- static_cast<int>(*src2.ptr<uchar>(y, x))) / 255.f;
*similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
}
}
}
similarity.release();
Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
UMat umat_erode;
erode(umat_similarity, umat_erode, kernel);
dilate(umat_erode, umat_similarity, kernel);
return umat_similarity;
}
void ChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
std::array<std::vector<UMat>, 3> images_channels;
// Split channels of each input image
for (const UMat& image: images)
{
std::vector<UMat> image_channels;
image_channels.resize(3);
split(image, image_channels);
for (int i = 0; i < int(images_channels.size()); ++i)
images_channels[i].emplace_back(std::move(image_channels[i]));
}
// For each channel, feed the channel of each image in a GainCompensator
gains_.clear();
gains_.resize(images.size());
GainCompensator compensator(getNrFeeds());
compensator.setSimilarityThreshold(getSimilarityThreshold());
compensator.prepareSimilarityMask(corners, images);
for (int c = 0; c < 3; ++c)
{
const std::vector<UMat>& channels = images_channels[c];
compensator.feed(corners, channels, masks);
std::vector<double> gains = compensator.gains();
for (int i = 0; i < int(gains.size()); ++i)
gains_.at(i)[c] = gains[i];
}
}
void ChannelsCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
{
CV_INSTRUMENT_REGION();
multiply(image, gains_.at(index), image);
}
void ChannelsCompensator::getMatGains(std::vector<Mat>& umv)
{
umv.clear();
for (int i = 0; i < static_cast<int>(gains_.size()); ++i)
{
Mat m;
Mat(gains_[i]).copyTo(m);
umv.push_back(m);
}
}
void ChannelsCompensator::setMatGains(std::vector<Mat>& umv)
{
for (int i = 0; i < static_cast<int>(umv.size()); i++)
{
Scalar s;
umv[i].copyTo(s);
gains_.push_back(s);
}
}
template<class Compensator>
void BlocksCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
CV_Assert(corners.size() == images.size() && images.size() == masks.size());
const int num_images = static_cast<int>(images.size());
std::vector<Size> bl_per_imgs(num_images);
std::vector<Point> block_corners;
std::vector<UMat> block_images;
std::vector<std::pair<UMat,uchar> > block_masks;
// Construct blocks for gain compensator
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
Size bl_per_img((images[img_idx].cols + bl_width_ - 1) / bl_width_,
(images[img_idx].rows + bl_height_ - 1) / bl_height_);
int bl_width = (images[img_idx].cols + bl_per_img.width - 1) / bl_per_img.width;
int bl_height = (images[img_idx].rows + bl_per_img.height - 1) / bl_per_img.height;
bl_per_imgs[img_idx] = bl_per_img;
for (int by = 0; by < bl_per_img.height; ++by)
{
for (int bx = 0; bx < bl_per_img.width; ++bx)
{
Point bl_tl(bx * bl_width, by * bl_height);
Point bl_br(std::min(bl_tl.x + bl_width, images[img_idx].cols),
std::min(bl_tl.y + bl_height, images[img_idx].rows));
block_corners.push_back(corners[img_idx] + bl_tl);
block_images.push_back(images[img_idx](Rect(bl_tl, bl_br)));
block_masks.push_back(std::make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)),
masks[img_idx].second));
}
}
}
if (getUpdateGain() || int(gain_maps_.size()) != num_images)
{
Compensator compensator;
compensator.setNrFeeds(getNrFeeds());
compensator.setSimilarityThreshold(getSimilarityThreshold());
compensator.feed(block_corners, block_images, block_masks);
gain_maps_.clear();
gain_maps_.resize(num_images);
Mat_<float> ker(1, 3);
ker(0, 0) = 0.25; ker(0, 1) = 0.5; ker(0, 2) = 0.25;
int bl_idx = 0;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
Size bl_per_img = bl_per_imgs[img_idx];
UMat gain_map = getGainMap(compensator, bl_idx, bl_per_img);
bl_idx += bl_per_img.width*bl_per_img.height;
for (int i=0; i<nr_gain_filtering_iterations_; ++i)
{
UMat tmp;
sepFilter2D(gain_map, tmp, CV_32F, ker, ker);
swap(gain_map, tmp);
}
gain_maps_[img_idx] = gain_map;
}
}
}
UMat BlocksCompensator::getGainMap(const GainCompensator& compensator, int bl_idx, Size bl_per_img)
{
std::vector<double> gains = compensator.gains();
UMat u_gain_map(bl_per_img, CV_32F);
Mat_<float> gain_map = u_gain_map.getMat(ACCESS_WRITE);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
return u_gain_map;
}
UMat BlocksCompensator::getGainMap(const ChannelsCompensator& compensator, int bl_idx, Size bl_per_img)
{
std::vector<Scalar> gains = compensator.gains();
UMat u_gain_map(bl_per_img, CV_32FC3);
Mat_<Vec3f> gain_map = u_gain_map.getMat(ACCESS_WRITE);
for (int by = 0; by < bl_per_img.height; ++by)
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
for (int c = 0; c < 3; ++c)
gain_map(by, bx)[c] = static_cast<float>(gains[bl_idx][c]);
return u_gain_map;
}
void BlocksCompensator::apply(int index, Point /*corner*/, InputOutputArray _image, InputArray /*mask*/)
{
CV_INSTRUMENT_REGION();
CV_Assert(_image.type() == CV_8UC3);
UMat u_gain_map;
if (gain_maps_.at(index).size() == _image.size())
u_gain_map = gain_maps_.at(index);
else
resize(gain_maps_.at(index), u_gain_map, _image.size(), 0, 0, INTER_LINEAR);
if (u_gain_map.channels() != 3)
{
std::vector<UMat> gains_channels;
gains_channels.push_back(u_gain_map);
gains_channels.push_back(u_gain_map);
gains_channels.push_back(u_gain_map);
merge(gains_channels, u_gain_map);
}
multiply(_image, u_gain_map, _image, 1, _image.type());
}
void BlocksCompensator::getMatGains(std::vector<Mat>& umv)
{
umv.clear();
for (int i = 0; i < static_cast<int>(gain_maps_.size()); ++i)
{
Mat m;
gain_maps_[i].copyTo(m);
umv.push_back(m);
}
}
void BlocksCompensator::setMatGains(std::vector<Mat>& umv)
{
for (int i = 0; i < static_cast<int>(umv.size()); i++)
{
UMat m;
umv[i].copyTo(m);
gain_maps_.push_back(m);
}
}
void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
BlocksCompensator::feed<GainCompensator>(corners, images, masks);
}
void BlocksChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks)
{
BlocksCompensator::feed<ChannelsCompensator>(corners, images, masks);
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,567 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/opencl/ocl_defs.hpp"
using namespace cv;
using namespace cv::detail;
using namespace cv::cuda;
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
namespace {
struct DistIdxPair
{
bool operator<(const DistIdxPair &other) const { return dist < other.dist; }
double dist;
int idx;
};
struct MatchPairsBody : ParallelLoopBody
{
MatchPairsBody(FeaturesMatcher &_matcher, const std::vector<ImageFeatures> &_features,
std::vector<MatchesInfo> &_pairwise_matches, std::vector<std::pair<int,int> > &_near_pairs)
: matcher(_matcher), features(_features),
pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}
void operator ()(const Range &r) const CV_OVERRIDE
{
cv::RNG rng = cv::theRNG(); // save entry rng state
const int num_images = static_cast<int>(features.size());
for (int i = r.start; i < r.end; ++i)
{
cv::theRNG() = cv::RNG(rng.state + i); // force "stable" RNG seed for each processed pair
int from = near_pairs[i].first;
int to = near_pairs[i].second;
int pair_idx = from*num_images + to;
matcher(features[from], features[to], pairwise_matches[pair_idx]);
pairwise_matches[pair_idx].src_img_idx = from;
pairwise_matches[pair_idx].dst_img_idx = to;
size_t dual_pair_idx = to*num_images + from;
pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
pairwise_matches[dual_pair_idx].src_img_idx = to;
pairwise_matches[dual_pair_idx].dst_img_idx = from;
if (!pairwise_matches[pair_idx].H.empty())
pairwise_matches[dual_pair_idx].H = pairwise_matches[pair_idx].H.inv();
for (size_t j = 0; j < pairwise_matches[dual_pair_idx].matches.size(); ++j)
std::swap(pairwise_matches[dual_pair_idx].matches[j].queryIdx,
pairwise_matches[dual_pair_idx].matches[j].trainIdx);
LOG(".");
}
}
FeaturesMatcher &matcher;
const std::vector<ImageFeatures> &features;
std::vector<MatchesInfo> &pairwise_matches;
std::vector<std::pair<int,int> > &near_pairs;
private:
void operator =(const MatchPairsBody&);
};
//////////////////////////////////////////////////////////////////////////////
typedef std::set<std::pair<int,int> > MatchesSet;
// These two classes are aimed to find features matches only, not to
// estimate homography
class CpuMatcher CV_FINAL : public FeaturesMatcher
{
public:
CpuMatcher(float match_conf) : FeaturesMatcher(true), match_conf_(match_conf) {}
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) CV_OVERRIDE;
private:
float match_conf_;
};
#ifdef HAVE_OPENCV_CUDAFEATURES2D
class GpuMatcher CV_FINAL : public FeaturesMatcher
{
public:
GpuMatcher(float match_conf) : match_conf_(match_conf) {}
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
void collectGarbage();
private:
float match_conf_;
GpuMat descriptors1_, descriptors2_;
GpuMat train_idx_, distance_, all_dist_;
std::vector< std::vector<DMatch> > pair_matches;
};
#endif
void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
{
CV_INSTRUMENT_REGION();
CV_Assert(features1.descriptors.type() == features2.descriptors.type());
CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
matches_info.matches.clear();
Ptr<cv::DescriptorMatcher> matcher;
#if 0 // TODO check this
if (ocl::isOpenCLActivated())
{
matcher = makePtr<BFMatcher>((int)NORM_L2);
}
else
#endif
{
Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
if (features2.descriptors.depth() == CV_8U)
{
indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
}
matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
}
std::vector< std::vector<DMatch> > pair_matches;
MatchesSet matches;
// Find 1->2 matches
matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
continue;
const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance)
{
matches_info.matches.push_back(m0);
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
}
}
LOG("\n1->2 matches: " << matches_info.matches.size() << endl);
// Find 2->1 matches
pair_matches.clear();
matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
continue;
const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance)
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
}
LOG("1->2 & 2->1 matches: " << matches_info.matches.size() << endl);
}
#ifdef HAVE_OPENCV_CUDAFEATURES2D
void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
{
CV_INSTRUMENT_REGION();
matches_info.matches.clear();
ensureSizeIsEnough(features1.descriptors.size(), features1.descriptors.type(), descriptors1_);
ensureSizeIsEnough(features2.descriptors.size(), features2.descriptors.type(), descriptors2_);
descriptors1_.upload(features1.descriptors);
descriptors2_.upload(features2.descriptors);
//TODO: NORM_L1 allows to avoid matcher crashes for ORB features, but is not absolutely correct for them.
// The best choice for ORB features is NORM_HAMMING, but it is incorrect for SURF features.
// More accurate fix in this place should be done in the future -- the type of the norm
// should be either a parameter of this method, or a field of the class.
Ptr<cuda::DescriptorMatcher> matcher = cuda::DescriptorMatcher::createBFMatcher(NORM_L1);
MatchesSet matches;
// Find 1->2 matches
pair_matches.clear();
matcher->knnMatch(descriptors1_, descriptors2_, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
continue;
const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance)
{
matches_info.matches.push_back(m0);
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
}
}
// Find 2->1 matches
pair_matches.clear();
matcher->knnMatch(descriptors2_, descriptors1_, pair_matches, 2);
for (size_t i = 0; i < pair_matches.size(); ++i)
{
if (pair_matches[i].size() < 2)
continue;
const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance)
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
}
}
void GpuMatcher::collectGarbage()
{
descriptors1_.release();
descriptors2_.release();
train_idx_.release();
distance_.release();
all_dist_.release();
std::vector< std::vector<DMatch> >().swap(pair_matches);
}
#endif
} // namespace
namespace cv {
namespace detail {
void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArrayOfArrays images,
std::vector<ImageFeatures> &features,
InputArrayOfArrays masks)
{
// compute all features
std::vector<std::vector<KeyPoint>> keypoints;
std::vector<UMat> descriptors;
// TODO replace with 1 call to new over load of detectAndCompute
featuresFinder->detect(images, keypoints, masks);
featuresFinder->compute(images, keypoints, descriptors);
// store to ImageFeatures
size_t count = images.total();
features.resize(count);
CV_Assert(count == keypoints.size() && count == descriptors.size());
for (size_t i = 0; i < count; ++i)
{
features[i].img_size = images.size(int(i));
features[i].keypoints = std::move(keypoints[i]);
features[i].descriptors = std::move(descriptors[i]);
}
}
void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArray image,
ImageFeatures &features,
InputArray mask)
{
features.img_size = image.size();
featuresFinder->detectAndCompute(image, mask, features.keypoints, features.descriptors);
}
//////////////////////////////////////////////////////////////////////////////
MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0), confidence(0) {}
MatchesInfo::MatchesInfo(const MatchesInfo &other) { *this = other; }
MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
{
src_img_idx = other.src_img_idx;
dst_img_idx = other.dst_img_idx;
matches = other.matches;
inliers_mask = other.inliers_mask;
num_inliers = other.num_inliers;
H = other.H.clone();
confidence = other.confidence;
return *this;
}
//////////////////////////////////////////////////////////////////////////////
void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const UMat &mask)
{
const int num_images = static_cast<int>(features.size());
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
if (mask_.empty())
mask_ = Mat::ones(num_images, num_images, CV_8U);
std::vector<std::pair<int,int> > near_pairs;
for (int i = 0; i < num_images - 1; ++i)
for (int j = i + 1; j < num_images; ++j)
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
near_pairs.push_back(std::make_pair(i, j));
pairwise_matches.clear(); // clear history values
pairwise_matches.resize(num_images * num_images);
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
if (is_thread_safe_)
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
else
body(Range(0, static_cast<int>(near_pairs.size())));
LOGLN_CHAT("");
}
//////////////////////////////////////////////////////////////////////////////
BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
{
CV_UNUSED(try_use_gpu);
#ifdef HAVE_OPENCV_CUDAFEATURES2D
if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
{
impl_ = makePtr<GpuMatcher>(match_conf);
}
else
#endif
{
impl_ = makePtr<CpuMatcher>(match_conf);
}
is_thread_safe_ = impl_->isThreadSafe();
num_matches_thresh1_ = num_matches_thresh1;
num_matches_thresh2_ = num_matches_thresh2;
}
Ptr<BestOf2NearestMatcher> BestOf2NearestMatcher::create(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
{
return makePtr<BestOf2NearestMatcher>(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2);
}
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo &matches_info)
{
CV_INSTRUMENT_REGION();
(*impl_)(features1, features2, matches_info);
// Check if it makes sense to find homography
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
return;
// Construct point-point correspondences for homography estimation
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
for (size_t i = 0; i < matches_info.matches.size(); ++i)
{
const DMatch& m = matches_info.matches[i];
Point2f p = features1.keypoints[m.queryIdx].pt;
p.x -= features1.img_size.width * 0.5f;
p.y -= features1.img_size.height * 0.5f;
src_points.at<Point2f>(0, static_cast<int>(i)) = p;
p = features2.keypoints[m.trainIdx].pt;
p.x -= features2.img_size.width * 0.5f;
p.y -= features2.img_size.height * 0.5f;
dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
}
// Find pair-wise motion
matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
return;
// Find number of inliers
matches_info.num_inliers = 0;
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
if (matches_info.inliers_mask[i])
matches_info.num_inliers++;
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
// using Invariant Features"
matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
// Set zero confidence to remove matches between too close images, as they don't provide
// additional information anyway. The threshold was set experimentally.
matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
// Check if we should try to refine motion
if (matches_info.num_inliers < num_matches_thresh2_)
return;
// Construct point-point correspondences for inliers only
src_points.create(1, matches_info.num_inliers, CV_32FC2);
dst_points.create(1, matches_info.num_inliers, CV_32FC2);
int inlier_idx = 0;
for (size_t i = 0; i < matches_info.matches.size(); ++i)
{
if (!matches_info.inliers_mask[i])
continue;
const DMatch& m = matches_info.matches[i];
Point2f p = features1.keypoints[m.queryIdx].pt;
p.x -= features1.img_size.width * 0.5f;
p.y -= features1.img_size.height * 0.5f;
src_points.at<Point2f>(0, inlier_idx) = p;
p = features2.keypoints[m.trainIdx].pt;
p.x -= features2.img_size.width * 0.5f;
p.y -= features2.img_size.height * 0.5f;
dst_points.at<Point2f>(0, inlier_idx) = p;
inlier_idx++;
}
// Rerun motion estimation on inliers only
matches_info.H = findHomography(src_points, dst_points, RANSAC);
}
void BestOf2NearestMatcher::collectGarbage()
{
impl_->collectGarbage();
}
BestOf2NearestRangeMatcher::BestOf2NearestRangeMatcher(int range_width, bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2): BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2)
{
range_width_ = range_width;
}
void BestOf2NearestRangeMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const UMat &mask)
{
const int num_images = static_cast<int>(features.size());
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
if (mask_.empty())
mask_ = Mat::ones(num_images, num_images, CV_8U);
std::vector<std::pair<int,int> > near_pairs;
for (int i = 0; i < num_images - 1; ++i)
for (int j = i + 1; j < std::min(num_images, i + range_width_); ++j)
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
near_pairs.push_back(std::make_pair(i, j));
pairwise_matches.resize(num_images * num_images);
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
if (is_thread_safe_)
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
else
body(Range(0, static_cast<int>(near_pairs.size())));
LOGLN_CHAT("");
}
void AffineBestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo &matches_info)
{
(*impl_)(features1, features2, matches_info);
// Check if it makes sense to find transform
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
return;
// Construct point-point correspondences for transform estimation
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
for (size_t i = 0; i < matches_info.matches.size(); ++i)
{
const cv::DMatch &m = matches_info.matches[i];
src_points.at<Point2f>(0, static_cast<int>(i)) = features1.keypoints[m.queryIdx].pt;
dst_points.at<Point2f>(0, static_cast<int>(i)) = features2.keypoints[m.trainIdx].pt;
}
// Find pair-wise motion
if (full_affine_)
matches_info.H = estimateAffine2D(src_points, dst_points, matches_info.inliers_mask);
else
matches_info.H = estimateAffinePartial2D(src_points, dst_points, matches_info.inliers_mask);
if (matches_info.H.empty()) {
// could not find transformation
matches_info.confidence = 0;
matches_info.num_inliers = 0;
return;
}
// Find number of inliers
matches_info.num_inliers = 0;
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
if (matches_info.inliers_mask[i])
matches_info.num_inliers++;
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic
// Image Stitching using Invariant Features"
matches_info.confidence =
matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
/* should we remove matches between too close images? */
// matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
// extend H to represent linear transformation in homogeneous coordinates
matches_info.H.push_back(Mat::zeros(1, 3, CV_64F));
matches_info.H.at<double>(2, 2) = 1;
}
} // namespace detail
} // namespace cv

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,298 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
//
// Common preprocessors macro
//
//
// TODO: Move this common code into "header" file
//
#ifndef NL // New Line: for preprocessor debugging
#define NL
#endif
#define REF(x) x
#define __CAT(x, y) x##y
#define CAT(x, y) __CAT(x, y)
//
// All matrixes are come with this description ("name" is a name of matrix):
// * name_CN - number of channels (1,2,3,4)
// * name_DEPTH - numeric value of CV_MAT_DEPTH(type). See CV_8U, CV_32S, etc macro below.
//
// Currently we also pass these attributes (to reduce this macro block):
// * name_T - datatype (int, float, uchar4, float4)
// * name_T1 - datatype for one channel (int, float, uchar).
// It is equal to result of "T1(name_T)" macro
// * name_TSIZE - CV_ELEM_SIZE(type).
// We can't use sizeof(name_T) here, because sizeof(float3) is usually equal to 8, not 6.
// * name_T1SIZE - CV_ELEM_SIZE1(type)
//
//
// Usage sample:
//
// #define workType TYPE(float, src_CN)
// #define convertToWorkType CONVERT_TO(workType)
// #define convertWorkTypeToDstType CONVERT(workType, dst_T)
//
// __kernel void kernelFn(DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(dst))
// {
// const int x = get_global_id(0);
// const int y = get_global_id(1);
//
// if (x < srcWidth && y < srcHeight)
// {
// int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
// int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
// workType value = convertToWorkType(LOAD_MAT_AT(src, src_byteOffset));
//
// ... value processing ...
//
// STORE_MAT_AT(dst, dst_byteOffset, convertWorkTypeToDstType(value));
// }
// }
//
#define DECLARE_MAT_ARG(name) \
__global uchar* restrict name ## Ptr, \
int name ## StepBytes, \
int name ## Offset, \
int name ## Height, \
int name ## Width NL
#define MAT_BYTE_OFFSET(name, x, y) mad24((y)/* + name ## OffsetY*/, name ## StepBytes, ((x)/* + name ## OffsetX*/) * (int)(name ## _TSIZE) + name ## Offset)
#define MAT_RELATIVE_BYTE_OFFSET(name, x, y) mad24(y, name ## StepBytes, (x) * (int)(name ## _TSIZE))
#define __LOAD_MAT_AT(name, byteOffset) *((const __global name ## _T*)(name ## Ptr + (byteOffset)))
#define __vload_CN__(name_cn) vload ## name_cn
#define __vload_CN_(name_cn) __vload_CN__(name_cn)
#define __vload_CN(name) __vload_CN_(name ## _CN)
#define __LOAD_MAT_AT_vload(name, byteOffset) __vload_CN(name)(0, ((const __global name ## _T1*)(name ## Ptr + (byteOffset))))
#define __LOAD_MAT_AT_1 __LOAD_MAT_AT
#define __LOAD_MAT_AT_2 __LOAD_MAT_AT
#define __LOAD_MAT_AT_3 __LOAD_MAT_AT_vload
#define __LOAD_MAT_AT_4 __LOAD_MAT_AT
#define __LOAD_MAT_AT_CN__(name_cn) __LOAD_MAT_AT_ ## name_cn
#define __LOAD_MAT_AT_CN_(name_cn) __LOAD_MAT_AT_CN__(name_cn)
#define __LOAD_MAT_AT_CN(name) __LOAD_MAT_AT_CN_(name ## _CN)
#define LOAD_MAT_AT(name, byteOffset) __LOAD_MAT_AT_CN(name)(name, byteOffset)
#define __STORE_MAT_AT(name, byteOffset, v) *((__global name ## _T*)(name ## Ptr + (byteOffset))) = v
#define __vstore_CN__(name_cn) vstore ## name_cn
#define __vstore_CN_(name_cn) __vstore_CN__(name_cn)
#define __vstore_CN(name) __vstore_CN_(name ## _CN)
#define __STORE_MAT_AT_vstore(name, byteOffset, v) __vstore_CN(name)(v, 0, ((__global name ## _T1*)(name ## Ptr + (byteOffset))))
#define __STORE_MAT_AT_1 __STORE_MAT_AT
#define __STORE_MAT_AT_2 __STORE_MAT_AT
#define __STORE_MAT_AT_3 __STORE_MAT_AT_vstore
#define __STORE_MAT_AT_4 __STORE_MAT_AT
#define __STORE_MAT_AT_CN__(name_cn) __STORE_MAT_AT_ ## name_cn
#define __STORE_MAT_AT_CN_(name_cn) __STORE_MAT_AT_CN__(name_cn)
#define __STORE_MAT_AT_CN(name) __STORE_MAT_AT_CN_(name ## _CN)
#define STORE_MAT_AT(name, byteOffset, v) __STORE_MAT_AT_CN(name)(name, byteOffset, v)
#define T1_uchar uchar
#define T1_uchar2 uchar
#define T1_uchar3 uchar
#define T1_uchar4 uchar
#define T1_char char
#define T1_char2 char
#define T1_char3 char
#define T1_char4 char
#define T1_ushort ushort
#define T1_ushort2 ushort
#define T1_ushort3 ushort
#define T1_ushort4 ushort
#define T1_short short
#define T1_short2 short
#define T1_short3 short
#define T1_short4 short
#define T1_int int
#define T1_int2 int
#define T1_int3 int
#define T1_int4 int
#define T1_float float
#define T1_float2 float
#define T1_float3 float
#define T1_float4 float
#define T1_double double
#define T1_double2 double
#define T1_double3 double
#define T1_double4 double
#define T1(type) REF(CAT(T1_, REF(type)))
#define uchar1 uchar
#define char1 char
#define short1 short
#define ushort1 ushort
#define int1 int
#define float1 float
#define double1 double
#define TYPE(type, cn) REF(CAT(REF(type), REF(cn)))
#define __CONVERT_MODE_uchar_uchar __NO_CONVERT
#define __CONVERT_MODE_uchar_char __CONVERT_sat
#define __CONVERT_MODE_uchar_ushort __CONVERT
#define __CONVERT_MODE_uchar_short __CONVERT
#define __CONVERT_MODE_uchar_int __CONVERT
#define __CONVERT_MODE_uchar_float __CONVERT
#define __CONVERT_MODE_uchar_double __CONVERT
#define __CONVERT_MODE_char_uchar __CONVERT_sat
#define __CONVERT_MODE_char_char __NO_CONVERT
#define __CONVERT_MODE_char_ushort __CONVERT_sat
#define __CONVERT_MODE_char_short __CONVERT
#define __CONVERT_MODE_char_int __CONVERT
#define __CONVERT_MODE_char_float __CONVERT
#define __CONVERT_MODE_char_double __CONVERT
#define __CONVERT_MODE_ushort_uchar __CONVERT_sat
#define __CONVERT_MODE_ushort_char __CONVERT_sat
#define __CONVERT_MODE_ushort_ushort __NO_CONVERT
#define __CONVERT_MODE_ushort_short __CONVERT_sat
#define __CONVERT_MODE_ushort_int __CONVERT
#define __CONVERT_MODE_ushort_float __CONVERT
#define __CONVERT_MODE_ushort_double __CONVERT
#define __CONVERT_MODE_short_uchar __CONVERT_sat
#define __CONVERT_MODE_short_char __CONVERT_sat
#define __CONVERT_MODE_short_ushort __CONVERT_sat
#define __CONVERT_MODE_short_short __NO_CONVERT
#define __CONVERT_MODE_short_int __CONVERT
#define __CONVERT_MODE_short_float __CONVERT
#define __CONVERT_MODE_short_double __CONVERT
#define __CONVERT_MODE_int_uchar __CONVERT_sat
#define __CONVERT_MODE_int_char __CONVERT_sat
#define __CONVERT_MODE_int_ushort __CONVERT_sat
#define __CONVERT_MODE_int_short __CONVERT_sat
#define __CONVERT_MODE_int_int __NO_CONVERT
#define __CONVERT_MODE_int_float __CONVERT
#define __CONVERT_MODE_int_double __CONVERT
#define __CONVERT_MODE_float_uchar __CONVERT_sat_rte
#define __CONVERT_MODE_float_char __CONVERT_sat_rte
#define __CONVERT_MODE_float_ushort __CONVERT_sat_rte
#define __CONVERT_MODE_float_short __CONVERT_sat_rte
#define __CONVERT_MODE_float_int __CONVERT_rte
#define __CONVERT_MODE_float_float __NO_CONVERT
#define __CONVERT_MODE_float_double __CONVERT
#define __CONVERT_MODE_double_uchar __CONVERT_sat_rte
#define __CONVERT_MODE_double_char __CONVERT_sat_rte
#define __CONVERT_MODE_double_ushort __CONVERT_sat_rte
#define __CONVERT_MODE_double_short __CONVERT_sat_rte
#define __CONVERT_MODE_double_int __CONVERT_rte
#define __CONVERT_MODE_double_float __CONVERT
#define __CONVERT_MODE_double_double __NO_CONVERT
#define __CONVERT_MODE(srcType, dstType) CAT(__CONVERT_MODE_, CAT(REF(T1(srcType)), CAT(_, REF(T1(dstType)))))
#define __ROUND_MODE__NO_CONVERT
#define __ROUND_MODE__CONVERT // nothing
#define __ROUND_MODE__CONVERT_rte _rte
#define __ROUND_MODE__CONVERT_sat _sat
#define __ROUND_MODE__CONVERT_sat_rte _sat_rte
#define ROUND_MODE(srcType, dstType) CAT(__ROUND_MODE_, __CONVERT_MODE(srcType, dstType))
#define __CONVERT_ROUND(dstType, roundMode) CAT(CAT(convert_, REF(dstType)), roundMode)
#define __NO_CONVERT(dstType) // nothing
#define __CONVERT(dstType) __CONVERT_ROUND(dstType,)
#define __CONVERT_rte(dstType) __CONVERT_ROUND(dstType,_rte)
#define __CONVERT_sat(dstType) __CONVERT_ROUND(dstType,_sat)
#define __CONVERT_sat_rte(dstType) __CONVERT_ROUND(dstType,_sat_rte)
#define CONVERT(srcType, dstType) REF(__CONVERT_MODE(srcType,dstType))(dstType)
#define CONVERT_TO(dstType) __CONVERT_ROUND(dstType,)
// OpenCV depths
#define CV_8U 0
#define CV_8S 1
#define CV_16U 2
#define CV_16S 3
#define CV_32S 4
#define CV_32F 5
#define CV_64F 6
//
// End of common preprocessors macro
//
#if defined(DEFINE_feed)
#define workType TYPE(weight_T1, src_CN)
#if src_DEPTH == 3 && src_CN == 3
#define convertSrcToWorkType convert_float3
#else
#define convertSrcToWorkType CONVERT_TO(workType)
#endif
#if dst_DEPTH == 3 && dst_CN == 3
#define convertToDstType convert_short3
#else
#define convertToDstType CONVERT_TO(dst_T) // sat_rte provides incompatible results with CPU path
#endif
__kernel void feed(
DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(weight),
DECLARE_MAT_ARG(dst), DECLARE_MAT_ARG(dstWeight)
)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
if (x < srcWidth && y < srcHeight)
{
int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
int dstWeight_byteOffset = MAT_BYTE_OFFSET(dstWeight, x, y);
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
workType src_value = convertSrcToWorkType(LOAD_MAT_AT(src, src_byteOffset));
STORE_MAT_AT(dst, dst_byteOffset, LOAD_MAT_AT(dst, dst_byteOffset) + convertToDstType(src_value * w));
STORE_MAT_AT(dstWeight, dstWeight_byteOffset, LOAD_MAT_AT(dstWeight, dstWeight_byteOffset) + w);
}
}
#endif
#if defined(DEFINE_normalizeUsingWeightMap)
#if mat_DEPTH == 3 && mat_CN == 3
#define workType float3
#define convertSrcToWorkType convert_float3
#define convertToDstType convert_short3
#else
#define workType TYPE(weight_T1, mat_CN)
#define convertSrcToWorkType CONVERT_TO(workType)
#define convertToDstType CONVERT_TO(mat_T) // sat_rte provides incompatible results with CPU path
#endif
#if weight_DEPTH >= CV_32F
#define WEIGHT_EPS 1e-5f
#else
#define WEIGHT_EPS 0
#endif
__kernel void normalizeUsingWeightMap(
DECLARE_MAT_ARG(mat), DECLARE_MAT_ARG(weight)
)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
if (x < matWidth && y < matHeight)
{
int mat_byteOffset = MAT_BYTE_OFFSET(mat, x, y);
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
workType value = convertSrcToWorkType(LOAD_MAT_AT(mat, mat_byteOffset));
value = value / (w + WEIGHT_EPS);
STORE_MAT_AT(mat, mat_byteOffset, convertToDstType(value));
}
}
#endif

View File

@@ -0,0 +1,169 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
__kernel void buildWarpPlaneMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
__constant float * ck_rinv, __constant float * ct,
int tl_u, int tl_v, float scale, int rowsPerWI)
{
int du = get_global_id(0);
int dv0 = get_global_id(1) * rowsPerWI;
if (du < cols)
{
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
float u = tl_u + du;
float x_ = fma(u, scale, -ct[0]);
float ct1 = 1 - ct[2];
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
ymap_index += ymap_step)
{
__global float * xmap = (__global float *)(xmapptr + xmap_index);
__global float * ymap = (__global float *)(ymapptr + ymap_index);
float v = tl_v + dv;
float y_ = fma(v, scale, -ct[1]);
float x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * ct1));
float y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * ct1));
float z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * ct1));
if (z != 0)
x /= z, y /= z;
else
x = y = -1;
xmap[0] = x;
ymap[0] = y;
}
}
}
__kernel void buildWarpCylindricalMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
__constant float * ck_rinv, int tl_u, int tl_v, float scale, int rowsPerWI)
{
int du = get_global_id(0);
int dv0 = get_global_id(1) * rowsPerWI;
if (du < cols)
{
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
float u = (tl_u + du) * scale;
float x_, z_;
x_ = sincos(u, &z_);
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
ymap_index += ymap_step)
{
__global float * xmap = (__global float *)(xmapptr + xmap_index);
__global float * ymap = (__global float *)(ymapptr + ymap_index);
float y_ = (tl_v + dv) * scale;
float x, y, z;
x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * z_));
y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * z_));
z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * z_));
if (z > 0)
x /= z, y /= z;
else
x = y = -1;
xmap[0] = x;
ymap[0] = y;
}
}
}
__kernel void buildWarpSphericalMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
__constant float * ck_rinv, int tl_u, int tl_v, float scale, int rowsPerWI)
{
int du = get_global_id(0);
int dv0 = get_global_id(1) * rowsPerWI;
if (du < cols)
{
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
float u = (tl_u + du) * scale;
float cosu, sinu = sincos(u, &cosu);
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
ymap_index += ymap_step)
{
__global float * xmap = (__global float *)(xmapptr + xmap_index);
__global float * ymap = (__global float *)(ymapptr + ymap_index);
float v = (tl_v + dv) * scale;
float cosv, sinv = sincos(v, &cosv);
float x_ = sinv * sinu;
float y_ = -cosv;
float z_ = sinv * cosu;
float x, y, z;
x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * z_));
y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * z_));
z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * z_));
if (z > 0)
x /= z, y /= z;
else
x = y = -1;
xmap[0] = x;
ymap[0] = y;
}
}
}

View File

@@ -0,0 +1,94 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_STITCHING_PRECOMP_H__
#define __OPENCV_STITCHING_PRECOMP_H__
#include "opencv2/opencv_modules.hpp"
#include <vector>
#include <algorithm>
#include <utility>
#include <set>
#include <functional>
#include <sstream>
#include <iostream>
#include <cmath>
#include "opencv2/core.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/stitching.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/timelapsers.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAWARPING
# include "opencv2/cudawarping.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFEATURES2D
# include "opencv2/cudafeatures2d.hpp"
#endif
#ifdef HAVE_OPENCV_CUDALEGACY
# include "opencv2/cudalegacy.hpp"
#endif
#include "opencv2/core/private.hpp"
#include "util_log.hpp"
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,654 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv {
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/)
// Stitcher::ORIG_RESOL is initialized in stitching.hpp.
#else
const double Stitcher::ORIG_RESOL = -1.0;
#endif
Ptr<Stitcher> Stitcher::create(Mode mode)
{
Ptr<Stitcher> stitcher = makePtr<Stitcher>();
stitcher->setRegistrationResol(0.6);
stitcher->setSeamEstimationResol(0.1);
stitcher->setCompositingResol(ORIG_RESOL);
stitcher->setPanoConfidenceThresh(1);
stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
stitcher->setBlender(makePtr<detail::MultiBandBlender>(false));
stitcher->setFeaturesFinder(ORB::create());
stitcher->setInterpolationFlags(INTER_LINEAR);
stitcher->work_scale_ = 1;
stitcher->seam_scale_ = 1;
stitcher->seam_work_aspect_ = 1;
stitcher->warped_image_scale_ = 1;
switch (mode)
{
case PANORAMA: // PANORAMA is the default
// mostly already setup
stitcher->setEstimator(makePtr<detail::HomographyBasedEstimator>());
stitcher->setWaveCorrection(true);
stitcher->setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher->setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(false));
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
break;
case SCANS:
stitcher->setEstimator(makePtr<detail::AffineBasedEstimator>());
stitcher->setWaveCorrection(false);
stitcher->setFeaturesMatcher(makePtr<detail::AffineBestOf2NearestMatcher>(false, false));
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterAffinePartial>());
stitcher->setWarper(makePtr<AffineWarper>());
stitcher->setExposureCompensator(makePtr<detail::NoExposureCompensator>());
break;
default:
CV_Error(Error::StsBadArg, "Invalid stitching mode. Must be one of Stitcher::Mode");
break;
}
return stitcher;
}
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks)
{
CV_INSTRUMENT_REGION();
images.getUMatVector(imgs_);
masks.getUMatVector(masks_);
Status status;
if ((status = matchImages()) != OK)
return status;
if ((status = estimateCameraParams()) != OK)
return status;
return OK;
}
Stitcher::Status Stitcher::composePanorama(OutputArray pano)
{
CV_INSTRUMENT_REGION();
return composePanorama(std::vector<UMat>(), pano);
}
Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
{
CV_INSTRUMENT_REGION();
LOGLN("Warping images (auxiliary)... ");
std::vector<UMat> imgs;
images.getUMatVector(imgs);
if (!imgs.empty())
{
CV_Assert(imgs.size() == imgs_.size());
UMat img;
seam_est_imgs_.resize(imgs.size());
for (size_t i = 0; i < imgs.size(); ++i)
{
imgs_[i] = imgs[i];
resize(imgs[i], img, Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
seam_est_imgs_[i] = img.clone();
}
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
}
UMat pano_;
#if ENABLE_LOG
int64 t = getTickCount();
#endif
std::vector<Point> corners(imgs_.size());
std::vector<UMat> masks_warped(imgs_.size());
std::vector<UMat> images_warped(imgs_.size());
std::vector<Size> sizes(imgs_.size());
std::vector<UMat> masks(imgs_.size());
// Prepare image masks
for (size_t i = 0; i < imgs_.size(); ++i)
{
masks[i].create(seam_est_imgs_[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
for (size_t i = 0; i < imgs_.size(); ++i)
{
Mat_<float> K;
cameras_[i].K().convertTo(K, CV_32F);
K(0,0) *= (float)seam_work_aspect_;
K(0,2) *= (float)seam_work_aspect_;
K(1,1) *= (float)seam_work_aspect_;
K(1,2) *= (float)seam_work_aspect_;
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, interp_flags_, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Compensate exposure before finding seams
exposure_comp_->feed(corners, images_warped, masks_warped);
for (size_t i = 0; i < imgs_.size(); ++i)
exposure_comp_->apply(int(i), corners[i], images_warped[i], masks_warped[i]);
// Find seams
std::vector<UMat> images_warped_f(imgs_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
seam_finder_->find(images_warped_f, corners, masks_warped);
// Release unused memory
seam_est_imgs_.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
LOGLN("Compositing...");
#if ENABLE_LOG
t = getTickCount();
#endif
UMat img_warped, img_warped_s;
UMat dilated_mask, seam_mask, mask, mask_warped;
//double compose_seam_aspect = 1;
double compose_work_aspect = 1;
bool is_blender_prepared = false;
double compose_scale = 1;
bool is_compose_scale_set = false;
std::vector<detail::CameraParams> cameras_scaled(cameras_);
UMat full_img, img;
for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
{
LOGLN("Compositing image #" << indices_[img_idx] + 1);
#if ENABLE_LOG
int64 compositing_t = getTickCount();
#endif
// Read image and resize it if necessary
full_img = imgs_[img_idx];
if (!is_compose_scale_set)
{
if (compose_resol_ > 0)
compose_scale = std::min(1.0, std::sqrt(compose_resol_ * 1e6 / full_img.size().area()));
is_compose_scale_set = true;
// Compute relative scales
//compose_seam_aspect = compose_scale / seam_scale_;
compose_work_aspect = compose_scale / work_scale_;
// Update warped image scale
float warp_scale = static_cast<float>(warped_image_scale_ * compose_work_aspect);
w = warper_->create(warp_scale);
// Update corners and sizes
for (size_t i = 0; i < imgs_.size(); ++i)
{
// Update intrinsics
cameras_scaled[i].ppx *= compose_work_aspect;
cameras_scaled[i].ppy *= compose_work_aspect;
cameras_scaled[i].focal *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes_[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
}
Mat K;
cameras_scaled[i].K().convertTo(K, CV_32F);
Rect roi = w->warpRoi(sz, K, cameras_scaled[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
}
if (std::abs(compose_scale - 1) > 1e-1)
{
#if ENABLE_LOG
int64 resize_t = getTickCount();
#endif
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
LOGLN(" resize time: " << ((getTickCount() - resize_t) / getTickFrequency()) << " sec");
}
else
img = full_img;
full_img.release();
Size img_size = img.size();
LOGLN(" after resize time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
Mat K;
cameras_scaled[img_idx].K().convertTo(K, CV_32F);
#if ENABLE_LOG
int64 pt = getTickCount();
#endif
// Warp the current image
w->warp(img, K, cameras_[img_idx].R, interp_flags_, BORDER_REFLECT, img_warped);
LOGLN(" warp the current image: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
LOGLN(" warp the current image mask: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
// Compensate exposure
exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
LOGLN(" compensate exposure: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();
// Make sure seam mask has proper size
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
bitwise_and(seam_mask, mask_warped, mask_warped);
LOGLN(" other: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
#if ENABLE_LOG
pt = getTickCount();
#endif
if (!is_blender_prepared)
{
blender_->prepare(corners, sizes);
is_blender_prepared = true;
}
LOGLN(" other2: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
LOGLN(" feed...");
#if ENABLE_LOG
int64 feed_t = getTickCount();
#endif
// Blend the current image
blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
LOGLN(" feed time: " << ((getTickCount() - feed_t) / getTickFrequency()) << " sec");
LOGLN("Compositing ## time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
}
#if ENABLE_LOG
int64 blend_t = getTickCount();
#endif
UMat result;
blender_->blend(result, result_mask_);
LOGLN("blend time: " << ((getTickCount() - blend_t) / getTickFrequency()) << " sec");
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
// so convert it to avoid user confusing
result.convertTo(pano, CV_8U);
return OK;
}
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
{
return stitch(images, noArray(), pano);
}
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano)
{
CV_INSTRUMENT_REGION();
Status status = estimateTransform(images, masks);
if (status != OK)
return status;
return composePanorama(pano);
}
Stitcher::Status Stitcher::matchImages()
{
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
work_scale_ = 1;
seam_work_aspect_ = 1;
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
features_.resize(imgs_.size());
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
LOGLN("Finding features...");
#if ENABLE_LOG
int64 t = getTickCount();
#endif
std::vector<UMat> feature_find_imgs(imgs_.size());
std::vector<UMat> feature_find_masks(masks_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
{
full_img_sizes_[i] = imgs_[i].size();
if (registr_resol_ < 0)
{
feature_find_imgs[i] = imgs_[i];
work_scale_ = 1;
is_work_scale_set = true;
}
else
{
if (!is_work_scale_set)
{
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
is_work_scale_set = true;
}
resize(imgs_[i], feature_find_imgs[i], Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
}
if (!is_seam_scale_set)
{
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
seam_work_aspect_ = seam_scale_ / work_scale_;
is_seam_scale_set = true;
}
if (!masks_.empty())
{
resize(masks_[i], feature_find_masks[i], Size(), work_scale_, work_scale_, INTER_NEAREST);
}
features_[i].img_idx = (int)i;
LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
}
// find features possibly in parallel
detail::computeImageFeatures(features_finder_, feature_find_imgs, features_, feature_find_masks);
// Do it to save memory
feature_find_imgs.clear();
feature_find_masks.clear();
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
LOG("Pairwise matching");
#if ENABLE_LOG
t = getTickCount();
#endif
(*features_matcher_)(features_, pairwise_matches_, matching_mask_);
features_matcher_->collectGarbage();
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Leave only images we are sure are from the same panorama
indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
std::vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
full_img_sizes_ = full_img_sizes_subset;
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
return OK;
}
Stitcher::Status Stitcher::estimateCameraParams()
{
// estimate homography in global frame
if (!(*estimator_)(features_, pairwise_matches_, cameras_))
return ERR_HOMOGRAPHY_EST_FAIL;
for (size_t i = 0; i < cameras_.size(); ++i)
{
Mat R;
cameras_[i].R.convertTo(R, CV_32F);
cameras_[i].R = R;
//LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
}
bundle_adjuster_->setConfThresh(conf_thresh_);
if (!(*bundle_adjuster_)(features_, pairwise_matches_, cameras_))
return ERR_CAMERA_PARAMS_ADJUST_FAIL;
// Find median focal length and use it as final image scale
std::vector<double> focals;
for (size_t i = 0; i < cameras_.size(); ++i)
{
//LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
focals.push_back(cameras_[i].focal);
}
std::sort(focals.begin(), focals.end());
if (focals.size() % 2 == 1)
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
if (do_wave_correct_)
{
std::vector<Mat> rmats;
for (size_t i = 0; i < cameras_.size(); ++i)
rmats.push_back(cameras_[i].R.clone());
detail::waveCorrect(rmats, wave_correct_kind_);
for (size_t i = 0; i < cameras_.size(); ++i)
cameras_[i].R = rmats[i];
}
return OK;
}
Stitcher::Status Stitcher::setTransform(InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras)
{
std::vector<int> component;
for (int i = 0; i < (int)images.total(); i++)
component.push_back(i);
return setTransform(images, cameras, component);
}
Stitcher::Status Stitcher::setTransform(
InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras, const std::vector<int> &component)
{
// CV_Assert(images.size() == cameras.size());
images.getUMatVector(imgs_);
masks_.clear();
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
work_scale_ = 1;
seam_work_aspect_ = 1;
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
{
full_img_sizes_[i] = imgs_[i].size();
if (registr_resol_ < 0)
{
work_scale_ = 1;
is_work_scale_set = true;
}
else
{
if (!is_work_scale_set)
{
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
is_work_scale_set = true;
}
}
if (!is_seam_scale_set)
{
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
seam_work_aspect_ = seam_scale_ / work_scale_;
is_seam_scale_set = true;
}
resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
}
features_.clear();
pairwise_matches_.clear();
indices_ = component;
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
std::vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
full_img_sizes_ = full_img_sizes_subset;
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
cameras_ = cameras;
std::vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
focals.push_back(cameras_[i].focal);
std::sort(focals.begin(), focals.end());
if (focals.size() % 2 == 1)
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
return Status::OK;
}
CV_DEPRECATED Ptr<Stitcher> createStitcher(bool /*ignored*/)
{
CV_INSTRUMENT_REGION();
return Stitcher::create(Stitcher::PANORAMA);
}
CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool /*ignored*/)
{
CV_INSTRUMENT_REGION();
return Stitcher::create(Stitcher::SCANS);
}
} // namespace cv

View File

@@ -0,0 +1,108 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencl_kernels_stitching.hpp"
namespace cv {
namespace detail {
Ptr<Timelapser> Timelapser::createDefault(int type)
{
if (type == AS_IS)
return makePtr<Timelapser>();
if (type == CROP)
return makePtr<TimelapserCrop>();
CV_Error(Error::StsBadArg, "unsupported timelapsing method");
}
void Timelapser::initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes)
{
dst_roi_ = resultRoi(corners, sizes);
dst_.create(dst_roi_.size(), CV_16SC3);
}
void Timelapser::process(InputArray _img, InputArray /*_mask*/, Point tl)
{
CV_INSTRUMENT_REGION();
dst_.setTo(Scalar::all(0));
Mat img = _img.getMat();
Mat dst = dst_.getMat(ACCESS_RW);
CV_Assert(img.type() == CV_16SC3);
int dx = tl.x - dst_roi_.x;
int dy = tl.y - dst_roi_.y;
for (int y = 0; y < img.rows; ++y)
{
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
for (int x = 0; x < img.cols; ++x)
{
if (test_point(Point(tl.x + x, tl.y + y)))
{
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
dst_row[dx + x] = src_row[x];
}
}
}
}
bool Timelapser::test_point(Point pt)
{
return dst_roi_.contains(pt);
}
void TimelapserCrop::initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes)
{
dst_roi_ = resultRoiIntersection(corners, sizes);
dst_.create(dst_roi_.size(), CV_16SC3);
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,188 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv {
namespace detail {
void DisjointSets::createOneElemSets(int n)
{
rank_.assign(n, 0);
size.assign(n, 1);
parent.resize(n);
for (int i = 0; i < n; ++i)
parent[i] = i;
}
int DisjointSets::findSetByElem(int elem)
{
int set = elem;
while (set != parent[set])
set = parent[set];
int next;
while (elem != parent[elem])
{
next = parent[elem];
parent[elem] = set;
elem = next;
}
return set;
}
int DisjointSets::mergeSets(int set1, int set2)
{
if (rank_[set1] < rank_[set2])
{
parent[set1] = set2;
size[set2] += size[set1];
return set2;
}
if (rank_[set2] < rank_[set1])
{
parent[set2] = set1;
size[set1] += size[set2];
return set1;
}
parent[set1] = set2;
rank_[set2]++;
size[set2] += size[set1];
return set2;
}
void Graph::addEdge(int from, int to, float weight)
{
edges_[from].push_back(GraphEdge(from, to, weight));
}
bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi)
{
int x_tl = std::max(tl1.x, tl2.x);
int y_tl = std::max(tl1.y, tl2.y);
int x_br = std::min(tl1.x + sz1.width, tl2.x + sz2.width);
int y_br = std::min(tl1.y + sz1.height, tl2.y + sz2.height);
if (x_tl < x_br && y_tl < y_br)
{
roi = Rect(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
return true;
}
return false;
}
Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images)
{
std::vector<Size> sizes(images.size());
for (size_t i = 0; i < images.size(); ++i)
sizes[i] = images[i].size();
return resultRoi(corners, sizes);
}
Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes)
{
CV_Assert(sizes.size() == corners.size());
Point tl(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
Point br(std::numeric_limits<int>::min(), std::numeric_limits<int>::min());
for (size_t i = 0; i < corners.size(); ++i)
{
tl.x = std::min(tl.x, corners[i].x);
tl.y = std::min(tl.y, corners[i].y);
br.x = std::max(br.x, corners[i].x + sizes[i].width);
br.y = std::max(br.y, corners[i].y + sizes[i].height);
}
return Rect(tl, br);
}
Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes)
{
CV_Assert(sizes.size() == corners.size());
Point tl(std::numeric_limits<int>::min(), std::numeric_limits<int>::min());
Point br(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
for (size_t i = 0; i < corners.size(); ++i)
{
tl.x = std::max(tl.x, corners[i].x);
tl.y = std::max(tl.y, corners[i].y);
br.x = std::min(br.x, corners[i].x + sizes[i].width);
br.y = std::min(br.y, corners[i].y + sizes[i].height);
}
return Rect(tl, br);
}
Point resultTl(const std::vector<Point> &corners)
{
Point tl(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
for (size_t i = 0; i < corners.size(); ++i)
{
tl.x = std::min(tl.x, corners[i].x);
tl.y = std::min(tl.y, corners[i].y);
}
return tl;
}
void selectRandomSubset(int count, int size, std::vector<int> &subset)
{
subset.clear();
for (int i = 0; i < size; ++i)
{
if (randu<int>() % (size - i) < count)
{
subset.push_back(i);
count--;
}
}
}
int& stitchingLogLevel()
{
static int _log_level=1;
return _log_level;
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,58 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_STITCHING_UTIL_LOG_HPP__
#define __OPENCV_STITCHING_UTIL_LOG_HPP__
#ifndef ENABLE_LOG
#define ENABLE_LOG 0
#endif
// TODO remove LOG macros, add logging class
#if ENABLE_LOG
#ifdef __ANDROID__
#include <iostream>
#include <sstream>
#include <android/log.h>
#define LOG_STITCHING_MSG(msg) \
do { \
Stringstream _os; \
_os << msg; \
__android_log_print(ANDROID_LOG_DEBUG, "STITCHING", "%s", _os.str().c_str()); \
} while(0);
#else
#include <iostream>
#define LOG_STITCHING_MSG(msg) for(;;) { std::cout << msg; std::cout.flush(); break; }
#endif
#else
#define LOG_STITCHING_MSG(msg)
#endif
#define LOG_(_level, _msg) \
for(;;) \
{ \
using namespace std; \
if ((_level) >= ::cv::detail::stitchingLogLevel()) \
{ \
LOG_STITCHING_MSG(_msg); \
} \
break; \
}
#define LOG(msg) LOG_(1, msg)
#define LOG_CHAT(msg) LOG_(0, msg)
#define LOGLN(msg) LOG(msg << std::endl)
#define LOGLN_CHAT(msg) LOG_CHAT(msg << std::endl)
//#if DEBUG_LOG_CHAT
// #define LOG_CHAT(msg) LOG(msg)
// #define LOGLN_CHAT(msg) LOGLN(msg)
//#else
// #define LOG_CHAT(msg) do{}while(0)
// #define LOGLN_CHAT(msg) do{}while(0)
//#endif
#endif // __OPENCV_STITCHING_UTIL_LOG_HPP__

View File

@@ -0,0 +1,561 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencl_kernels_stitching.hpp"
#include <iostream>
namespace cv {
PyRotationWarper::PyRotationWarper(String warp_type, float scale)
{
Ptr<WarperCreator> warper_creator;
if (warp_type == "plane")
warper_creator = makePtr<cv::PlaneWarper>();
else if (warp_type == "affine")
warper_creator = makePtr<cv::AffineWarper>();
else if (warp_type == "cylindrical")
warper_creator = makePtr<cv::CylindricalWarper>();
else if (warp_type == "spherical")
warper_creator = makePtr<cv::SphericalWarper>();
else if (warp_type == "fisheye")
warper_creator = makePtr<cv::FisheyeWarper>();
else if (warp_type == "stereographic")
warper_creator = makePtr<cv::StereographicWarper>();
else if (warp_type == "compressedPlaneA2B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlaneA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
else if (warp_type == "compressedPlanePortraitA2B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "compressedPlanePortraitA1.5B1")
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "paniniA2B1")
warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
else if (warp_type == "paniniA1.5B1")
warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
else if (warp_type == "paniniPortraitA2B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
else if (warp_type == "paniniPortraitA1.5B1")
warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
else if (warp_type == "mercator")
warper_creator = makePtr<cv::MercatorWarper>();
else if (warp_type == "transverseMercator")
warper_creator = makePtr<cv::TransverseMercatorWarper>();
if (warper_creator.get() != nullptr)
{
rw = warper_creator->create(scale);
}
else
CV_Error(Error::StsError, "unknown warper :" + warp_type);
}
Point2f PyRotationWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
{
return rw.get()->warpPoint(pt, K, R);
}
#if CV_VERSION_MAJOR != 4
Point2f PyRotationWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
{
return rw.get()->warpPointBackward(pt, K, R);
}
#endif
Rect PyRotationWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
return rw.get()->buildMaps(src_size, K, R, xmap, ymap);
}
Point PyRotationWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst)
{
if (rw.get() == nullptr)
CV_Error(Error::StsError, "Warper is null");
Point p = rw.get()->warp(src, K, R, interp_mode, border_mode, dst);
return p;
}
void PyRotationWarper::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst)
{
return rw.get()->warpBackward(src, K, R, interp_mode, border_mode, dst_size, dst);
}
Rect PyRotationWarper::warpRoi(Size src_size, InputArray K, InputArray R)
{
return rw.get()->warpRoi(src_size, K, R);
}
namespace detail {
void ProjectorBase::setCameraParams(InputArray _K, InputArray _R, InputArray _T)
{
Mat K = _K.getMat(), R = _R.getMat(), T = _T.getMat();
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
CV_Assert((T.size() == Size(1, 3) || T.size() == Size(3, 1)) && T.type() == CV_32F);
Mat_<float> K_(K);
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);
k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);
Mat_<float> Rinv = R.t();
rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);
rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);
rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);
Mat_<float> R_Kinv = R * K.inv();
r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2);
r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2);
r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2);
Mat_<float> K_Rinv = K * Rinv;
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
Mat_<float> T_(T.reshape(0, 3));
t[0] = T_(0,0); t[1] = T_(1,0); t[2] = T_(2,0);
}
Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T)
{
projector_.setCameraParams(K, R, T);
Point2f uv;
projector_.mapForward(pt.x, pt.y, uv.x, uv.y);
return uv;
}
Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
{
float tz[] = {0.f, 0.f, 0.f};
Mat_<float> T(3, 1, tz);
return warpPoint(pt, K, R, T);
}
Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T)
{
projector_.setCameraParams(K, R, T);
Point2f xy;
projector_.mapBackward(pt.x, pt.y, xy.x, xy.y);
return xy;
}
Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
{
float tz[] = { 0.f, 0.f, 0.f };
Mat_<float> T(3, 1, tz);
return warpPointBackward(pt, K, R, T);
}
Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
return buildMaps(src_size, K, R, Mat::zeros(3, 1, CV_32FC1), xmap, ymap);
}
Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray _xmap, OutputArray _ymap)
{
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
_xmap.create(dsize, CV_32FC1);
_ymap.create(dsize, CV_32FC1);
#ifdef HAVE_OPENCL
if (ocl::isOpenCLActivated())
{
ocl::Kernel k("buildWarpPlaneMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv), t(1, 3, CV_32FC1, projector_.t);
UMat uxmap = _xmap.getUMat(), uymap = _ymap.getUMat(),
uk_rinv = k_rinv.getUMat(ACCESS_READ), ut = t.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), ocl::KernelArg::PtrReadOnly(ut),
dst_tl.x, dst_tl.y, 1/projector_.scale, rowsPerWI);
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
if (k.run(2, globalsize, NULL, true))
{
CV_IMPL_ADD(CV_IMPL_OCL);
return Rect(dst_tl, dst_br);
}
}
}
#endif
Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();
float x, y;
for (int v = dst_tl.y; v <= dst_br.y; ++v)
{
for (int u = dst_tl.x; u <= dst_br.x; ++u)
{
projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
}
}
return Rect(dst_tl, dst_br);
}
Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R,
int interp_mode, int border_mode, OutputArray dst)
{
float tz[] = {0.f, 0.f, 0.f};
Mat_<float> T(3, 1, tz);
return warp(src, K, R, T, interp_mode, border_mode, dst);
}
Rect PlaneWarper::warpRoi(Size src_size, InputArray K, InputArray R, InputArray T)
{
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));
}
Rect PlaneWarper::warpRoi(Size src_size, InputArray K, InputArray R)
{
float tz[] = {0.f, 0.f, 0.f};
Mat_<float> T(3, 1, tz);
return warpRoi(src_size, K, R, T);
}
void PlaneWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{
float tl_uf = std::numeric_limits<float>::max();
float tl_vf = std::numeric_limits<float>::max();
float br_uf = -std::numeric_limits<float>::max();
float br_vf = -std::numeric_limits<float>::max();
float u, v;
projector_.mapForward(0, 0, u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
projector_.mapForward(0, static_cast<float>(src_size.height - 1), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
projector_.mapForward(static_cast<float>(src_size.width - 1), 0, u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(src_size.height - 1), u, v);
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
dst_tl.x = static_cast<int>(tl_uf);
dst_tl.y = static_cast<int>(tl_vf);
dst_br.x = static_cast<int>(br_uf);
dst_br.y = static_cast<int>(br_vf);
}
Point2f AffineWarper::warpPoint(const Point2f &pt, InputArray K, InputArray H)
{
Mat R, T;
getRTfromHomogeneous(H, R, T);
return PlaneWarper::warpPoint(pt, K, R, T);
}
Point2f AffineWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray H)
{
Mat R, T;
getRTfromHomogeneous(H, R, T);
return PlaneWarper::warpPointBackward(pt, K, R, T);
}
Rect AffineWarper::buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap)
{
Mat R, T;
getRTfromHomogeneous(H, R, T);
return PlaneWarper::buildMaps(src_size, K, R, T, xmap, ymap);
}
Point AffineWarper::warp(InputArray src, InputArray K, InputArray H,
int interp_mode, int border_mode, OutputArray dst)
{
Mat R, T;
getRTfromHomogeneous(H, R, T);
return PlaneWarper::warp(src, K, R, T, interp_mode, border_mode, dst);
}
Rect AffineWarper::warpRoi(Size src_size, InputArray K, InputArray H)
{
Mat R, T;
getRTfromHomogeneous(H, R, T);
return PlaneWarper::warpRoi(src_size, K, R, T);
}
void AffineWarper::getRTfromHomogeneous(InputArray H_, Mat &R, Mat &T)
{
Mat H = H_.getMat();
CV_Assert(H.size() == Size(3, 3) && H.type() == CV_32F);
T = Mat::zeros(3, 1, CV_32F);
R = H.clone();
T.at<float>(0,0) = R.at<float>(0,2);
T.at<float>(1,0) = R.at<float>(1,2);
R.at<float>(0,2) = 0.f;
R.at<float>(1,2) = 0.f;
// we want to compensate transform to fit into plane warper
R = R.t();
T = (R * T) * -1;
}
void SphericalWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{
detectResultRoiByBorder(src_size, dst_tl, dst_br);
float tl_uf = static_cast<float>(dst_tl.x);
float tl_vf = static_cast<float>(dst_tl.y);
float br_uf = static_cast<float>(dst_br.x);
float br_vf = static_cast<float>(dst_br.y);
float x = projector_.rinv[1];
float y = projector_.rinv[4];
float z = projector_.rinv[7];
if (y > 0.f)
{
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
{
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(CV_PI * projector_.scale));
}
}
x = projector_.rinv[1];
y = -projector_.rinv[4];
z = projector_.rinv[7];
if (y > 0.f)
{
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
{
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(0));
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(0));
}
}
dst_tl.x = static_cast<int>(tl_uf);
dst_tl.y = static_cast<int>(tl_vf);
dst_br.x = static_cast<int>(br_uf);
dst_br.y = static_cast<int>(br_vf);
}
void SphericalPortraitWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
{
detectResultRoiByBorder(src_size, dst_tl, dst_br);
float tl_uf = static_cast<float>(dst_tl.x);
float tl_vf = static_cast<float>(dst_tl.y);
float br_uf = static_cast<float>(dst_br.x);
float br_vf = static_cast<float>(dst_br.y);
float x = projector_.rinv[0];
float y = projector_.rinv[3];
float z = projector_.rinv[6];
if (y > 0.f)
{
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
{
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(CV_PI * projector_.scale));
}
}
x = projector_.rinv[0];
y = -projector_.rinv[3];
z = projector_.rinv[6];
if (y > 0.f)
{
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
float y_ = projector_.k[4] * y / z + projector_.k[5];
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
{
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(0));
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(0));
}
}
dst_tl.x = static_cast<int>(tl_uf);
dst_tl.y = static_cast<int>(tl_vf);
dst_br.x = static_cast<int>(br_uf);
dst_br.y = static_cast<int>(br_vf);
}
/////////////////////////////////////////// SphericalWarper ////////////////////////////////////////
Rect SphericalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
#ifdef HAVE_OPENCL
if (ocl::isOpenCLActivated())
{
ocl::Kernel k("buildWarpSphericalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, 1/projector_.scale, rowsPerWI);
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
if (k.run(2, globalsize, NULL, true))
{
CV_IMPL_ADD(CV_IMPL_OCL);
return Rect(dst_tl, dst_br);
}
}
}
#endif
return RotationWarperBase<SphericalProjector>::buildMaps(src_size, K, R, xmap, ymap);
}
Point SphericalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
/////////////////////////////////////////// CylindricalWarper ////////////////////////////////////////
Rect CylindricalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
{
#ifdef HAVE_OPENCL
if (ocl::isOpenCLActivated())
{
ocl::Kernel k("buildWarpCylindricalMaps", ocl::stitching::warpers_oclsrc);
if (!k.empty())
{
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, 1/projector_.scale,
rowsPerWI);
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
if (k.run(2, globalsize, NULL, true))
{
CV_IMPL_ADD(CV_IMPL_OCL);
return Rect(dst_tl, dst_br);
}
}
}
#endif
return RotationWarperBase<CylindricalProjector>::buildMaps(src_size, K, R, xmap, ymap);
}
Point CylindricalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
{
UMat uxmap, uymap;
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
return dst_roi.tl();
}
} // namespace detail
} // namespace cv

View File

@@ -0,0 +1,292 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
#ifdef HAVE_CUDA
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], const float t[3], float scale,
cudaStream_t stream);
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream);
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
const float k_rinv[9], const float r_kinv[9], float scale,
cudaStream_t stream);
}
}}}
static void buildWarpPlaneMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, InputArray _T,
float scale, OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
{
CV_UNUSED(src_size);
Mat K = _K.getMat();
Mat R = _R.getMat();
Mat T = _T.getMat();
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
CV_Assert( (T.size() == Size(3,1) || T.size() == Size(1,3)) && T.type() == CV_32FC1 && T.isContinuous() );
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert( K_Rinv.isContinuous() );
CV_Assert( R_Kinv.isContinuous() );
_map_x.create(dst_roi.size(), CV_32FC1);
_map_y.create(dst_roi.size(), CV_32FC1);
GpuMat map_x = _map_x.getGpuMat();
GpuMat map_y = _map_y.getGpuMat();
device::imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
T.ptr<float>(), scale, StreamAccessor::getStream(stream));
}
static void buildWarpSphericalMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, float scale,
OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
{
CV_UNUSED(src_size);
Mat K = _K.getMat();
Mat R = _R.getMat();
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert( K_Rinv.isContinuous() );
CV_Assert( R_Kinv.isContinuous() );
_map_x.create(dst_roi.size(), CV_32FC1);
_map_y.create(dst_roi.size(), CV_32FC1);
GpuMat map_x = _map_x.getGpuMat();
GpuMat map_y = _map_y.getGpuMat();
device::imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
}
static void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, float scale,
OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
{
CV_UNUSED(src_size);
Mat K = _K.getMat();
Mat R = _R.getMat();
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
Mat K_Rinv = K * R.t();
Mat R_Kinv = R * K.inv();
CV_Assert( K_Rinv.isContinuous() );
CV_Assert( R_Kinv.isContinuous() );
_map_x.create(dst_roi.size(), CV_32FC1);
_map_y.create(dst_roi.size(), CV_32FC1);
GpuMat map_x = _map_x.getGpuMat();
GpuMat map_y = _map_y.getGpuMat();
device::imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
}
#endif
Rect cv::detail::PlaneWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R,
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
{
return buildMaps(src_size, K, R, Mat::zeros(3, 1, CV_32F), xmap, ymap);
}
Rect cv::detail::PlaneWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T,
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
{
#ifndef HAVE_CUDA
CV_UNUSED(src_size);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(T);
CV_UNUSED(xmap);
CV_UNUSED(ymap);
throw_no_cuda();
#else
projector_.setCameraParams(K, R, T);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
::buildWarpPlaneMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
K, R, T, projector_.scale, xmap, ymap);
return Rect(dst_tl, dst_br);
#endif
}
Point cv::detail::PlaneWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
int interp_mode, int border_mode,
cuda::GpuMat & dst)
{
return warp(src, K, R, Mat::zeros(3, 1, CV_32F), interp_mode, border_mode, dst);
}
Point cv::detail::PlaneWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T,
int interp_mode, int border_mode,
cuda::GpuMat & dst)
{
#ifndef HAVE_OPENCV_CUDAWARPING
CV_UNUSED(src);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(T);
CV_UNUSED(interp_mode);
CV_UNUSED(border_mode);
CV_UNUSED(dst);
throw_no_cuda();
#else
Rect dst_roi = buildMaps(src.size(), K, R, T, d_xmap_, d_ymap_);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
return dst_roi.tl();
#endif
}
Rect cv::detail::SphericalWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap)
{
#ifndef HAVE_CUDA
CV_UNUSED(src_size);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(xmap);
CV_UNUSED(ymap);
throw_no_cuda();
#else
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
::buildWarpSphericalMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
K, R, projector_.scale, xmap, ymap);
return Rect(dst_tl, dst_br);
#endif
}
Point cv::detail::SphericalWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
int interp_mode, int border_mode,
cuda::GpuMat & dst)
{
#ifndef HAVE_OPENCV_CUDAWARPING
CV_UNUSED(src);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(interp_mode);
CV_UNUSED(border_mode);
CV_UNUSED(dst);
throw_no_cuda();
#else
Rect dst_roi = buildMaps(src.size(), K, R, d_xmap_, d_ymap_);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
return dst_roi.tl();
#endif
}
Rect cv::detail::CylindricalWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R,
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
{
#ifndef HAVE_CUDA
CV_UNUSED(src_size);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(xmap);
CV_UNUSED(ymap);
throw_no_cuda();
#else
projector_.setCameraParams(K, R);
Point dst_tl, dst_br;
detectResultRoi(src_size, dst_tl, dst_br);
::buildWarpCylindricalMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
K, R, projector_.scale, xmap, ymap);
return Rect(dst_tl, dst_br);
#endif
}
Point cv::detail::CylindricalWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
int interp_mode, int border_mode,
cuda::GpuMat & dst)
{
#ifndef HAVE_OPENCV_CUDAWARPING
CV_UNUSED(src);
CV_UNUSED(K);
CV_UNUSED(R);
CV_UNUSED(interp_mode);
CV_UNUSED(border_mode);
CV_UNUSED(dst);
throw_no_cuda();
#else
Rect dst_roi = buildMaps(src.size(), K, R, d_xmap_, d_ymap_);
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
return dst_roi.tl();
#endif
}

View File

@@ -0,0 +1,169 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#include "opencv2/stitching/warpers.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
struct WarperTestBase :
public Test, public TestUtils
{
Mat src, dst, xmap, ymap;
UMat usrc, udst, uxmap, uymap;
Mat K, R;
void generateTestData()
{
Size size = randomSize(1, MAX_VALUE);
src = randomMat(size, CV_32FC1, -500, 500);
src.copyTo(usrc);
K = Mat::eye(3, 3, CV_32FC1);
float angle = (float)(30.0 * CV_PI / 180.0);
float rotationMatrix[9] = {
(float)cos(angle), (float)sin(angle), 0,
(float)-sin(angle), (float)cos(angle), 0,
0, 0, 1
};
Mat(3, 3, CV_32FC1, rotationMatrix).copyTo(R);
}
void Near(double threshold = 0.)
{
EXPECT_MAT_NEAR(xmap, uxmap, threshold);
EXPECT_MAT_NEAR(ymap, uymap, threshold);
EXPECT_MAT_NEAR(dst, udst, threshold);
}
};
typedef WarperTestBase SphericalWarperTest;
OCL_TEST_F(SphericalWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<SphericalWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1e-4);
}
}
typedef WarperTestBase CylindricalWarperTest;
OCL_TEST_F(CylindricalWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<CylindricalWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1e-4);
}
}
typedef WarperTestBase PlaneWarperTest;
OCL_TEST_F(PlaneWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<PlaneWarper>();
Ptr<detail::RotationWarper> warper = creator->create(2.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1.5e-4);
}
}
typedef WarperTestBase AffineWarperTest;
OCL_TEST_F(AffineWarperTest, Mat)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
Ptr<WarperCreator> creator = makePtr<AffineWarper>();
Ptr<detail::RotationWarper> warper = creator->create(1.0);
OCL_OFF(warper->buildMaps(src.size(), K, R, xmap, ymap));
OCL_ON(warper->buildMaps(usrc.size(), K, R, uxmap, uymap));
OCL_OFF(warper->warp(src, K, R, INTER_LINEAR, BORDER_REPLICATE, dst));
OCL_ON(warper->warp(usrc, K, R, INTER_LINEAR, BORDER_REPLICATE, udst));
Near(1.5e-4);
}
}
} } // namespace opencv_test::ocl
#endif // HAVE_OPENCL

View File

@@ -0,0 +1,79 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(MultiBandBlender, CanBlendTwoImages)
{
Mat image1 = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/baboon.png");
Mat image2 = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/lena.png");
ASSERT_EQ(image1.rows, image2.rows); ASSERT_EQ(image1.cols, image2.cols);
Mat image1s, image2s;
image1.convertTo(image1s, CV_16S);
image2.convertTo(image2s, CV_16S);
Mat mask1(image1s.size(), CV_8U);
mask1(Rect(0, 0, mask1.cols/2, mask1.rows)).setTo(255);
mask1(Rect(mask1.cols/2, 0, mask1.cols - mask1.cols/2, mask1.rows)).setTo(0);
Mat mask2(image2s.size(), CV_8U);
mask2(Rect(0, 0, mask2.cols/2, mask2.rows)).setTo(0);
mask2(Rect(mask2.cols/2, 0, mask2.cols - mask2.cols/2, mask2.rows)).setTo(255);
detail::MultiBandBlender blender(false, 5);
blender.prepare(Rect(0, 0, max(image1s.cols, image2s.cols), max(image1s.rows, image2s.rows)));
blender.feed(image1s, mask1, Point(0,0));
blender.feed(image2s, mask2, Point(0,0));
Mat result_s, result_mask;
blender.blend(result_s, result_mask);
Mat result; result_s.convertTo(result, CV_8U);
Mat expected = imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/baboon_lena.png");
double psnr = cvtest::PSNR(expected, result);
EXPECT_GE(psnr, 50);
}
}} // namespace

View File

@@ -0,0 +1,93 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/ts/cuda_test.hpp"
namespace opencv_test { namespace {
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
void multiBandBlend(const cv::Mat& im1, const cv::Mat& im2, const cv::Mat& mask1, const cv::Mat& mask2, cv::Mat& result, bool try_cuda)
{
detail::MultiBandBlender blender(try_cuda, 5);
blender.prepare(Rect(0, 0, max(im1.cols, im2.cols), max(im1.rows, im2.rows)));
// If using cuda try blending multiple times without calling prepare inbetween
for (int i = 0; i < (try_cuda ? 10 : 1); ++i) {
blender.feed(im1, mask1, Point(0, 0));
blender.feed(im2, mask2, Point(0, 0));
Mat result_s, result_mask;
blender.blend(result_s, result_mask);
result_s.convertTo(result, CV_8U);
}
}
TEST(CUDA_MultiBandBlender, Accuracy)
{
Mat image1 = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/baboon.png");
Mat image2 = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/lena.png");
ASSERT_EQ(image1.rows, image2.rows); ASSERT_EQ(image1.cols, image2.cols);
Mat image1s, image2s;
image1.convertTo(image1s, CV_16S);
image2.convertTo(image2s, CV_16S);
Mat mask1(image1s.size(), CV_8U);
mask1(Rect(0, 0, mask1.cols/2, mask1.rows)).setTo(255);
mask1(Rect(mask1.cols/2, 0, mask1.cols - mask1.cols/2, mask1.rows)).setTo(0);
Mat mask2(image2s.size(), CV_8U);
mask2(Rect(0, 0, mask2.cols/2, mask2.rows)).setTo(0);
mask2(Rect(mask2.cols/2, 0, mask2.cols - mask2.cols/2, mask2.rows)).setTo(255);
cv::Mat result;
multiBandBlend(image1s, image2s, mask1, mask2, result, false);
cv::Mat result_cuda;
multiBandBlend(image1s, image2s, mask1, mask2, result_cuda, true);
EXPECT_MAT_NEAR(result, result_cuda, 3);
}
#endif
}} // namespace

View File

@@ -0,0 +1,70 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test {
namespace {
double minPSNR(UMat src1, UMat src2)
{
std::vector<UMat> src1_channels, src2_channels;
split(src1, src1_channels);
split(src2, src2_channels);
double psnr = cvtest::PSNR(src1_channels[0], src2_channels[0]);
psnr = std::min(psnr, cvtest::PSNR(src1_channels[1], src2_channels[1]));
return std::min(psnr, cvtest::PSNR(src1_channels[2], src2_channels[2]));
}
TEST(ExposureCompensate, SimilarityThreshold)
{
UMat source;
imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(source);
UMat image1 = source.clone();
UMat image2 = source.clone();
// Add a big artifact
image2(Rect(150, 150, 100, 100)).setTo(Scalar(0, 0, 255));
UMat mask(image1.size(), CV_8U);
mask.setTo(255);
detail::BlocksChannelsCompensator compensator;
compensator.setNrGainsFilteringIterations(0); // makes it more clear
// Feed the compensator, image 1 and 2 are perfectly
// identical, except for the red artifact in image 2
// Apart from that artifact, there is no exposure to compensate
compensator.setSimilarityThreshold(1);
uchar xff = 255;
compensator.feed(
{{}, {}},
{image1, image2},
{{mask, xff}, {mask, xff}}
);
// Verify that the artifact in image 2 did create
// an artifact in image1 during the exposure compensation
UMat image1_result = image1.clone();
compensator.apply(0, {}, image1_result, mask);
double psnr_no_similarity_mask = minPSNR(image1, image1_result);
EXPECT_LT(psnr_no_similarity_mask, 45);
// Add a similarity threshold and verify that
// the artifact in image1 is gone
compensator.setSimilarityThreshold(0.1);
compensator.feed(
{{}, {}},
{image1, image2},
{{mask, xff}, {mask, xff}}
);
image1_result = image1.clone();
compensator.apply(0, {}, image1_result, mask);
double psnr_similarity_mask = minPSNR(image1, image1_result);
EXPECT_GT(psnr_similarity_mask, 300);
}
} // namespace
} // namespace opencv_test

View File

@@ -0,0 +1,10 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN(".")

View File

@@ -0,0 +1,117 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
TEST(SurfFeaturesFinder, CanFindInROIs)
{
Ptr<Feature2D> finder = xfeatures2d::SURF::create();
Mat img = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/lena.png");
vector<Rect> rois;
rois.push_back(Rect(0, 0, img.cols / 2, img.rows / 2));
rois.push_back(Rect(img.cols / 2, img.rows / 2, img.cols - img.cols / 2, img.rows - img.rows / 2));
// construct mask
Mat mask = Mat::zeros(img.size(), CV_8U);
for (const Rect &roi : rois)
{
Mat(mask, roi) = 1;
}
detail::ImageFeatures roi_features;
detail::computeImageFeatures(finder, img, roi_features, mask);
int tl_rect_count = 0, br_rect_count = 0, bad_count = 0;
for (const auto &keypoint : roi_features.keypoints)
{
if (rois[0].contains(keypoint.pt))
tl_rect_count++;
else if (rois[1].contains(keypoint.pt))
br_rect_count++;
else
bad_count++;
}
EXPECT_GT(tl_rect_count, 0);
EXPECT_GT(br_rect_count, 0);
EXPECT_EQ(bad_count, 0);
}
#endif // HAVE_OPENCV_XFEATURES2D && OPENCV_ENABLE_NONFREE
TEST(ParallelFeaturesFinder, IsSameWithSerial)
{
Ptr<Feature2D> para_finder = ORB::create();
Ptr<Feature2D> serial_finder = ORB::create();
Mat img = imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/a3.png", IMREAD_GRAYSCALE);
detail::ImageFeatures serial_features;
detail::computeImageFeatures(serial_finder, img, serial_features);
vector<Mat> imgs(50, img);
vector<detail::ImageFeatures> para_features(imgs.size());
detail::computeImageFeatures(para_finder, imgs, para_features); // FIXIT This call doesn't use parallel_for_()
// results must be the same
Mat serial_descriptors;
serial_features.descriptors.copyTo(serial_descriptors);
for(size_t i = 0; i < para_features.size(); ++i)
{
SCOPED_TRACE(cv::format("i=%zu", i));
EXPECT_EQ(serial_descriptors.size(), para_features[i].descriptors.size());
#if 0 // FIXIT ORB descriptors are not bit-exact (perhaps due internal parallel_for usage)
ASSERT_EQ(0, cv::norm(u_serial_descriptors, para_features[i].descriptors, NORM_L1))
<< "serial_size=" << u_serial_descriptors.size()
<< " par_size=" << para_features[i].descriptors.size()
<< endl << u_serial_descriptors.getMat(ACCESS_READ)
<< endl << endl << para_features[i].descriptors.getMat(ACCESS_READ);
#endif
EXPECT_EQ(serial_features.img_size, para_features[i].img_size);
EXPECT_EQ(serial_features.keypoints.size(), para_features[i].keypoints.size());
}
}
}} // namespace

View File

@@ -0,0 +1,18 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/stitching.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
#endif

View File

@@ -0,0 +1,131 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/stitching/warpers.hpp"
namespace opencv_test { namespace {
class ReprojectionTest : public ::testing::Test {
protected:
const size_t TEST_COUNT = 15;
Mat K, R;
RNG rng = RNG(0);
ReprojectionTest()
{
K = Mat::eye(3, 3, CV_32FC1);
float angle = (float)(30.0 * CV_PI / 180.0);
float rotationMatrix[9] = {
(float)cos(angle), (float)sin(angle), 0,
(float)-sin(angle), (float)cos(angle), 0,
0, 0, 1
};
Mat(3, 3, CV_32FC1, rotationMatrix).copyTo(R);
}
void TestReprojection(Ptr<detail::RotationWarper> warper, Point2f pt) {
Point2f projected_pt = warper->warpPoint(pt, K, R);
Point2f reprojected_pt = warper->warpPointBackward(projected_pt, K, R);
EXPECT_NEAR(pt.x, reprojected_pt.x, float( 1e-5));
EXPECT_NEAR(pt.y, reprojected_pt.y, float( 1e-5));
}
};
TEST_F(ReprojectionTest, PlaneWarper)
{
Ptr<WarperCreator> creator = makePtr<PlaneWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, AffineWarper)
{
Ptr<WarperCreator> creator = makePtr<AffineWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, CylindricalWarper)
{
Ptr<WarperCreator> creator = makePtr<CylindricalWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, SphericalWarper)
{
Ptr<WarperCreator> creator = makePtr<SphericalWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, FisheyeWarper)
{
Ptr<WarperCreator> creator = makePtr<FisheyeWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, StereographicWarper)
{
Ptr<WarperCreator> creator = makePtr<StereographicWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, CompressedRectilinearWarper)
{
Ptr<WarperCreator> creator = makePtr<CompressedRectilinearWarper>(1.5f, 1.0f);
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, CompressedRectilinearPortraitWarper)
{
Ptr<WarperCreator> creator = makePtr<CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, PaniniWarper)
{
Ptr<WarperCreator> creator = makePtr<PaniniWarper>(1.5f, 1.0f);
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, PaniniPortraitWarper)
{
Ptr<WarperCreator> creator = makePtr<PaniniPortraitWarper>(1.5f, 1.0f);
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, MercatorWarper)
{
Ptr<WarperCreator> creator = makePtr<MercatorWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
TEST_F(ReprojectionTest, TransverseMercatorWarper)
{
Ptr<WarperCreator> creator = makePtr<TransverseMercatorWarper>();
for (size_t i = 0; i < TEST_COUNT; ++i) {
TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f)));
}
}
}} // namespace

View File

@@ -0,0 +1,28 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ImageStitcher, setTransform)
{
vector<Mat> images;
images.push_back(imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/s1.jpg"));
images.push_back(imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/s2.jpg"));
Mat expected;
Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::PANORAMA);
EXPECT_TRUE(Stitcher::OK == stitcher->estimateTransform(images));
EXPECT_TRUE(Stitcher::OK == stitcher->composePanorama(expected));
Mat result;
Ptr<Stitcher> another_stitcher = Stitcher::create(Stitcher::PANORAMA);
EXPECT_TRUE(Stitcher::OK == another_stitcher->setTransform(images, stitcher->cameras()));
EXPECT_TRUE(Stitcher::OK == another_stitcher->composePanorama(result));
EXPECT_DOUBLE_EQ(cvtest::norm(expected, result, NORM_INF), .0);
}
}} // namespace opencv_test

View File

@@ -0,0 +1,50 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test {
namespace {
detail::WaveCorrectKind correctionKind(const std::vector<UMat>& images)
{
Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::PANORAMA);
stitcher->estimateTransform(images);
std::vector<Mat> rmats;
auto cameras = stitcher->cameras();
for (const auto& camera: cameras)
rmats.push_back(camera.R);
return detail::autoDetectWaveCorrectKind(rmats);
}
TEST(WaveCorrection, AutoWaveCorrection)
{
std::vector<UMat> images(2);
imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(images[0]);
imread(cvtest::TS::ptr()->get_data_path() + "stitching/s2.jpg").copyTo(images[1]);
EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(images));
std::vector<UMat> rotated_images(2);
rotate(images[0], rotated_images[0], cv::ROTATE_90_CLOCKWISE);
rotate(images[1], rotated_images[1], cv::ROTATE_90_CLOCKWISE);
EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images));
rotate(images[0], rotated_images[0], cv::ROTATE_90_COUNTERCLOCKWISE);
rotate(images[1], rotated_images[1], cv::ROTATE_90_COUNTERCLOCKWISE);
EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images));
rotate(images[0], rotated_images[0], cv::ROTATE_180);
rotate(images[1], rotated_images[1], cv::ROTATE_180);
EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(rotated_images));
}
} // namespace
} // namespace opencv_test