init - 初始化项目
This commit is contained in:
@@ -0,0 +1,104 @@
|
||||
from tensorflow.keras.applications import (
|
||||
VGG16, vgg16,
|
||||
VGG19, vgg19,
|
||||
|
||||
ResNet50, resnet,
|
||||
ResNet101,
|
||||
ResNet152,
|
||||
|
||||
DenseNet121, densenet,
|
||||
DenseNet169,
|
||||
DenseNet201,
|
||||
|
||||
InceptionResNetV2, inception_resnet_v2,
|
||||
InceptionV3, inception_v3,
|
||||
|
||||
MobileNet, mobilenet,
|
||||
MobileNetV2, mobilenet_v2,
|
||||
|
||||
NASNetLarge, nasnet,
|
||||
NASNetMobile,
|
||||
|
||||
Xception, xception
|
||||
)
|
||||
|
||||
from ..tf_model import TFModelPreparer
|
||||
from ..tf_model import (
|
||||
TFModelProcessor,
|
||||
TFDnnModelProcessor
|
||||
)
|
||||
from ...common.evaluation.classification.cls_data_fetcher import TFPreprocessedFetch
|
||||
from ...common.test.cls_model_test_pipeline import ClsModelTestPipeline
|
||||
from ...common.test.configs.default_preprocess_config import (
|
||||
tf_input_blob,
|
||||
pytorch_input_blob,
|
||||
tf_model_blob_caffe_mode
|
||||
)
|
||||
from ...common.utils import set_tf_env, create_extended_parser
|
||||
|
||||
model_dict = {
|
||||
"vgg16": [VGG16, vgg16, tf_model_blob_caffe_mode],
|
||||
"vgg19": [VGG19, vgg19, tf_model_blob_caffe_mode],
|
||||
|
||||
"resnet50": [ResNet50, resnet, tf_model_blob_caffe_mode],
|
||||
"resnet101": [ResNet101, resnet, tf_model_blob_caffe_mode],
|
||||
"resnet152": [ResNet152, resnet, tf_model_blob_caffe_mode],
|
||||
|
||||
"densenet121": [DenseNet121, densenet, pytorch_input_blob],
|
||||
"densenet169": [DenseNet169, densenet, pytorch_input_blob],
|
||||
"densenet201": [DenseNet201, densenet, pytorch_input_blob],
|
||||
|
||||
"inceptionresnetv2": [InceptionResNetV2, inception_resnet_v2, tf_input_blob],
|
||||
"inceptionv3": [InceptionV3, inception_v3, tf_input_blob],
|
||||
|
||||
"mobilenet": [MobileNet, mobilenet, tf_input_blob],
|
||||
"mobilenetv2": [MobileNetV2, mobilenet_v2, tf_input_blob],
|
||||
|
||||
"nasnetlarge": [NASNetLarge, nasnet, tf_input_blob],
|
||||
"nasnetmobile": [NASNetMobile, nasnet, tf_input_blob],
|
||||
|
||||
"xception": [Xception, xception, tf_input_blob]
|
||||
}
|
||||
|
||||
CNN_CLASS_ID = 0
|
||||
CNN_UTILS_ID = 1
|
||||
DEFAULT_BLOB_PARAMS_ID = 2
|
||||
|
||||
|
||||
class TFClsModel(TFModelPreparer):
|
||||
def __init__(self, model_name, original_model):
|
||||
super(TFClsModel, self).__init__(model_name, original_model)
|
||||
|
||||
|
||||
def main():
|
||||
set_tf_env()
|
||||
|
||||
parser = create_extended_parser(list(model_dict.keys()))
|
||||
cmd_args = parser.parse_args()
|
||||
|
||||
model_name = cmd_args.model_name
|
||||
model_name_val = model_dict[model_name]
|
||||
|
||||
cls_model = TFClsModel(
|
||||
model_name=model_name,
|
||||
original_model=model_name_val[CNN_CLASS_ID](
|
||||
include_top=True,
|
||||
weights="imagenet"
|
||||
)
|
||||
)
|
||||
|
||||
tf_cls_pipeline = ClsModelTestPipeline(
|
||||
network_model=cls_model,
|
||||
model_processor=TFModelProcessor,
|
||||
dnn_model_processor=TFDnnModelProcessor,
|
||||
data_fetcher=TFPreprocessedFetch,
|
||||
img_processor=model_name_val[CNN_UTILS_ID].preprocess_input,
|
||||
cls_args_parser=parser,
|
||||
default_input_blob_preproc=model_name_val[DEFAULT_BLOB_PARAMS_ID]
|
||||
)
|
||||
|
||||
tf_cls_pipeline.init_test_pipeline()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,142 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.applications import MobileNet
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
from ...common.utils import set_tf_env
|
||||
|
||||
|
||||
def get_tf_model_proto(tf_model):
|
||||
# define the directory for .pb model
|
||||
pb_model_path = "models"
|
||||
|
||||
# define the name of .pb model
|
||||
pb_model_name = "mobilenet.pb"
|
||||
|
||||
# create directory for further converted model
|
||||
os.makedirs(pb_model_path, exist_ok=True)
|
||||
|
||||
# get model TF graph
|
||||
tf_model_graph = tf.function(lambda x: tf_model(x))
|
||||
|
||||
# get concrete function
|
||||
tf_model_graph = tf_model_graph.get_concrete_function(
|
||||
tf.TensorSpec(tf_model.inputs[0].shape, tf_model.inputs[0].dtype))
|
||||
|
||||
# obtain frozen concrete function
|
||||
frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph)
|
||||
# get frozen graph
|
||||
frozen_tf_func.graph.as_graph_def()
|
||||
|
||||
# save full tf model
|
||||
tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph,
|
||||
logdir=pb_model_path,
|
||||
name=pb_model_name,
|
||||
as_text=False)
|
||||
|
||||
return os.path.join(pb_model_path, pb_model_name)
|
||||
|
||||
|
||||
def get_preprocessed_img(img_path):
|
||||
# read the image
|
||||
input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
|
||||
input_img = input_img.astype(np.float32)
|
||||
|
||||
# define preprocess parameters
|
||||
mean = np.array([1.0, 1.0, 1.0]) * 127.5
|
||||
scale = 1 / 127.5
|
||||
|
||||
# prepare input blob to fit the model input:
|
||||
# 1. subtract mean
|
||||
# 2. scale to set pixel values from 0 to 1
|
||||
input_blob = cv2.dnn.blobFromImage(
|
||||
image=input_img,
|
||||
scalefactor=scale,
|
||||
size=(224, 224), # img target size
|
||||
mean=mean,
|
||||
swapRB=True, # BGR -> RGB
|
||||
crop=True # center crop
|
||||
)
|
||||
print("Input blob shape: {}\n".format(input_blob.shape))
|
||||
|
||||
return input_blob
|
||||
|
||||
|
||||
def get_imagenet_labels(labels_path):
|
||||
with open(labels_path) as f:
|
||||
imagenet_labels = [line.strip() for line in f.readlines()]
|
||||
return imagenet_labels
|
||||
|
||||
|
||||
def get_opencv_dnn_prediction(opencv_net, preproc_img, imagenet_labels):
|
||||
# set OpenCV DNN input
|
||||
opencv_net.setInput(preproc_img)
|
||||
|
||||
# OpenCV DNN inference
|
||||
out = opencv_net.forward()
|
||||
print("OpenCV DNN prediction: \n")
|
||||
print("* shape: ", out.shape)
|
||||
|
||||
# get the predicted class ID
|
||||
imagenet_class_id = np.argmax(out)
|
||||
|
||||
# get confidence
|
||||
confidence = out[0][imagenet_class_id]
|
||||
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
|
||||
print("* confidence: {:.4f}\n".format(confidence))
|
||||
|
||||
|
||||
def get_tf_dnn_prediction(original_net, preproc_img, imagenet_labels):
|
||||
# inference
|
||||
preproc_img = preproc_img.transpose(0, 2, 3, 1)
|
||||
print("TF input blob shape: {}\n".format(preproc_img.shape))
|
||||
|
||||
out = original_net(preproc_img)
|
||||
|
||||
print("\nTensorFlow model prediction: \n")
|
||||
print("* shape: ", out.shape)
|
||||
|
||||
# get the predicted class ID
|
||||
imagenet_class_id = np.argmax(out)
|
||||
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
|
||||
|
||||
# get confidence
|
||||
confidence = out[0][imagenet_class_id]
|
||||
print("* confidence: {:.4f}".format(confidence))
|
||||
|
||||
|
||||
def main():
|
||||
# configure TF launching
|
||||
set_tf_env()
|
||||
|
||||
# initialize TF MobileNet model
|
||||
original_tf_model = MobileNet(
|
||||
include_top=True,
|
||||
weights="imagenet"
|
||||
)
|
||||
|
||||
# get TF frozen graph path
|
||||
full_pb_path = get_tf_model_proto(original_tf_model)
|
||||
|
||||
# read frozen graph with OpenCV API
|
||||
opencv_net = cv2.dnn.readNetFromTensorflow(full_pb_path)
|
||||
print("OpenCV model was successfully read. Model layers: \n", opencv_net.getLayerNames())
|
||||
|
||||
# get preprocessed image
|
||||
input_img = get_preprocessed_img("../data/squirrel_cls.jpg")
|
||||
|
||||
# get ImageNet labels
|
||||
imagenet_labels = get_imagenet_labels("../data/dnn/classification_classes_ILSVRC2012.txt")
|
||||
|
||||
# obtain OpenCV DNN predictions
|
||||
get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels)
|
||||
|
||||
# obtain TF model predictions
|
||||
get_tf_dnn_prediction(original_tf_model, input_img, imagenet_labels)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
import tarfile
|
||||
import urllib
|
||||
|
||||
DETECTION_MODELS_URL = 'http://download.tensorflow.org/models/object_detection/'
|
||||
|
||||
|
||||
def extract_tf_frozen_graph(model_name, extracted_model_path):
|
||||
# define model archive name
|
||||
tf_model_tar = model_name + '.tar.gz'
|
||||
# define link to retrieve model archive
|
||||
model_link = DETECTION_MODELS_URL + tf_model_tar
|
||||
|
||||
tf_frozen_graph_name = 'frozen_inference_graph'
|
||||
|
||||
try:
|
||||
urllib.request.urlretrieve(model_link, tf_model_tar)
|
||||
except Exception:
|
||||
print("TF {} was not retrieved: {}".format(model_name, model_link))
|
||||
return
|
||||
|
||||
print("TF {} was retrieved.".format(model_name))
|
||||
|
||||
tf_model_tar = tarfile.open(tf_model_tar)
|
||||
frozen_graph_path = ""
|
||||
|
||||
for model_tar_elem in tf_model_tar.getmembers():
|
||||
if tf_frozen_graph_name in os.path.basename(model_tar_elem.name):
|
||||
tf_model_tar.extract(model_tar_elem, extracted_model_path)
|
||||
frozen_graph_path = os.path.join(extracted_model_path, model_tar_elem.name)
|
||||
break
|
||||
tf_model_tar.close()
|
||||
|
||||
return frozen_graph_path
|
||||
|
||||
|
||||
def main():
|
||||
tf_model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
|
||||
graph_extraction_dir = "./"
|
||||
frozen_graph_path = extract_tf_frozen_graph(tf_model_name, graph_extraction_dir)
|
||||
print("Frozen graph path for {}: {}".format(tf_model_name, frozen_graph_path))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
112
samples/dnn/dnn_model_runner/dnn_conversion/tf/tf_model.py
Normal file
112
samples/dnn/dnn_model_runner/dnn_conversion/tf/tf_model.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import cv2
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
from ..common.abstract_model import AbstractModel, Framework
|
||||
from ..common.utils import DNN_LIB, get_full_model_path
|
||||
|
||||
CURRENT_LIB = "TF"
|
||||
MODEL_FORMAT = ".pb"
|
||||
|
||||
|
||||
class TFModelPreparer(AbstractModel):
|
||||
""" Class for the preparation of the TF models: original and converted OpenCV Net.
|
||||
|
||||
Args:
|
||||
model_name: TF model name
|
||||
original_model: TF configured model object or session
|
||||
is_ready_graph: indicates whether ready .pb file already exists
|
||||
tf_model_graph_path: path to the existing frozen TF graph
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name="default",
|
||||
original_model=None,
|
||||
is_ready_graph=False,
|
||||
tf_model_graph_path=""
|
||||
):
|
||||
self._model_name = model_name
|
||||
self._original_model = original_model
|
||||
self._model_to_save = ""
|
||||
|
||||
self._is_ready_to_transfer_graph = is_ready_graph
|
||||
self.model_path = self._set_model_path(tf_model_graph_path)
|
||||
self._dnn_model = self._set_dnn_model()
|
||||
|
||||
def _set_dnn_model(self):
|
||||
if not self._is_ready_to_transfer_graph:
|
||||
# get model TF graph
|
||||
tf_model_graph = tf.function(lambda x: self._original_model(x))
|
||||
|
||||
tf_model_graph = tf_model_graph.get_concrete_function(
|
||||
tf.TensorSpec(self._original_model.inputs[0].shape, self._original_model.inputs[0].dtype))
|
||||
|
||||
# obtain frozen concrete function
|
||||
frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph)
|
||||
frozen_tf_func.graph.as_graph_def()
|
||||
|
||||
# save full TF model
|
||||
tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph,
|
||||
logdir=self.model_path["path"],
|
||||
name=self._model_to_save,
|
||||
as_text=False)
|
||||
|
||||
return cv2.dnn.readNetFromTensorflow(self.model_path["full_path"])
|
||||
|
||||
def _set_model_path(self, tf_pb_file_path):
|
||||
""" Method for setting model paths.
|
||||
|
||||
Args:
|
||||
tf_pb_file_path: path to the existing TF .pb
|
||||
|
||||
Returns:
|
||||
dictionary, where full_path key means saved model path and its full name.
|
||||
"""
|
||||
model_paths_dict = {
|
||||
"path": "",
|
||||
"full_path": tf_pb_file_path
|
||||
}
|
||||
|
||||
if not self._is_ready_to_transfer_graph:
|
||||
self._model_to_save = self._model_name + MODEL_FORMAT
|
||||
model_paths_dict = get_full_model_path(CURRENT_LIB.lower(), self._model_to_save)
|
||||
|
||||
return model_paths_dict
|
||||
|
||||
def get_prepared_models(self):
|
||||
original_lib_name = CURRENT_LIB + " " + self._model_name
|
||||
configured_model_dict = {
|
||||
original_lib_name: self._original_model,
|
||||
DNN_LIB + " " + self._model_name: self._dnn_model
|
||||
}
|
||||
return configured_model_dict
|
||||
|
||||
|
||||
class TFModelProcessor(Framework):
|
||||
def __init__(self, prepared_model, model_name):
|
||||
self._prepared_model = prepared_model
|
||||
self._name = model_name
|
||||
|
||||
def get_output(self, input_blob):
|
||||
assert len(input_blob.shape) == 4
|
||||
batch_tf = input_blob.transpose(0, 2, 3, 1)
|
||||
out = self._prepared_model(batch_tf)
|
||||
return out
|
||||
|
||||
def get_name(self):
|
||||
return CURRENT_LIB
|
||||
|
||||
|
||||
class TFDnnModelProcessor(Framework):
|
||||
def __init__(self, prepared_dnn_model, model_name):
|
||||
self._prepared_dnn_model = prepared_dnn_model
|
||||
self._name = model_name
|
||||
|
||||
def get_output(self, input_blob):
|
||||
self._prepared_dnn_model.setInput(input_blob)
|
||||
ret_val = self._prepared_dnn_model.forward()
|
||||
return ret_val
|
||||
|
||||
def get_name(self):
|
||||
return DNN_LIB
|
||||
Reference in New Issue
Block a user