Sample Code for Model Evaluation
Sample code is provided for common scenarios such as image classification, image semantic segmentation, and object detection. You can compile your evaluation code based on the sample code.
- Sample Code for Models of the Image Classification Type
- Sample Code for Models of the Image Semantic Segmentation Type
- Sample Code for Models of the Object Detection Type
Sample Code for Models of the Image Classification Type
The training model corresponding to the following sample code is the built-in algorithm ResNet_v1_50 (TensorFlow engine).
- model_url: model directory. After a model version is selected on the GUI, this parameter is automatically added in the background.
- data_url: dataset directory. After a dataset version is selected on the GUI, this parameter is automatically added in the background.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import json
import logging
import os
import sys
import tempfile
import h5py
import numpy as np
from PIL import Image
import moxing as mox
import tensorflow as tf
from deep_moxing.framework.manifest_api.manifest_api import get_sample_list
from deep_moxing.model_analysis.api import analyse, tmp_save
from deep_moxing.model_analysis.common.constant import TMP_FILE_NAME
logging.basicConfig(level=logging.DEBUG)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to output files')
tf.app.flags.DEFINE_string('adv_param_json',
'{"attack_method":"i-FGSM","eps":30, "iter_times":4}',
'params for attacks')
FLAGS(sys.argv, known_only=True)
def _preprocess(data_path):
img = Image.open(data_path)
img = img.convert('RGB')
img = np.asarray(img, dtype=np.float32)
img = img[np.newaxis, :, :, :]
return img
def softmax(x):
x = np.array(x)
orig_shape = x.shape
if len(x.shape) > 1:
# Matrix
x = np.apply_along_axis(lambda x: np.exp(x - np.max(x)), 1, x)
denominator = np.apply_along_axis(lambda x: 1.0 / np.sum(x), 1, x)
if len(denominator.shape) == 1:
denominator = denominator.reshape((denominator.shape[0], 1))
x = x * denominator
else:
# Vector
x_max = np.max(x)
x = x - x_max
numerator = np.exp(x)
denominator = 1.0 / np.sum(numerator)
x = numerator.dot(denominator)
assert x.shape == orig_shape
return x
def get_dataset(data_path, label_map_dict):
label_list = []
img_name_list = []
if 'manifest' in data_path:
manifest, _ = get_sample_list(
manifest_path=data_path, task_type='image_classification')
for item in manifest:
if len(item[1]) != 0:
label_list.append(label_map_dict.get(item[1][0]))
img_name_list.append(item[0])
else:
continue
else:
label_name_list = os.listdir(data_path)
label_dict = {}
for idx, item in enumerate(label_name_list):
label_dict[str(idx)] = item
sub_img_list = os.listdir(os.path.join(data_path, item))
img_name_list += [
os.path.join(data_path, item, img_name) for img_name in sub_img_list
]
label_list += [label_map_dict.get(item)] * len(sub_img_list)
return img_name_list, label_list
def deal_ckpt_and_data_with_obs():
pb_dir = FLAGS.model_url
data_path = FLAGS.data_url
if pb_dir.startswith('obs://'):
mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
pb_dir = '/cache/ckpt'
print('------------- download success ------------')
if data_path.startswith('obs://'):
mox.file.copy_parallel(data_path, '/cache/data/')
data_path = '/cache/data/'
print('------------- download dataset success ------------')
assert os.path.isdir(pb_dir), 'Error, pb_dir must be a directory'
return pb_dir, data_path
def evalution():
pb_dir, data_path = deal_ckpt_and_data_with_obs()
adv_param_json = FLAGS.adv_param_json
index_file = os.path.join(pb_dir, 'index')
try:
label_file = h5py.File(index_file, 'r')
label_array = label_file['labels_list'][:].tolist()
label_array = [item.decode('utf-8') for item in label_array]
except Exception as e:
logging.warning(e)
logging.warning('index file is not a h5 file, try json.')
with open(index_file, 'r') as load_f:
label_file = json.load(load_f)
label_array = label_file['labels_list'][:]
label_map_dict = {}
label_dict = {}
for idx, item in enumerate(label_array):
label_map_dict[item] = idx
label_dict[idx] = item
print(label_map_dict)
print(label_dict)
data_file_list, label_list = get_dataset(data_path, label_map_dict)
assert len(label_list) > 0, 'missing valid data'
assert None not in label_list, 'dataset and model not match'
pred_list = []
file_name_list = []
img_list = []
for img_path in data_file_list:
img = _preprocess(img_path)
img_list.append(img)
file_name_list.append(img_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
with tf.Session(graph=tf.Graph(), config=config) as sess:
meta_graph_def = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
signature = meta_graph_def.signature_def
signature_key = 'predict_object'
input_key = 'images'
output_key = 'logits'
x_tensor_name = signature[signature_key].inputs[input_key].name
y_tensor_name = signature[signature_key].outputs[output_key].name
x = sess.graph.get_tensor_by_name(x_tensor_name)
y = sess.graph.get_tensor_by_name(y_tensor_name)
for img in img_list:
pred_output = sess.run([y], {x: img})
pred_output = softmax(pred_output[0])
pred_list.append(pred_output[0].tolist())
label_dict = json.dumps(label_dict)
task_type = 'image_classification'
# analyse
res = analyse(
task_type=task_type,
pred_list=pred_list,
label_list=label_list,
name_list=file_name_list,
label_map_dict=label_dict)
if __name__ == "__main__":
evalution()
|
Sample Code for Models of the Image Semantic Segmentation Type
The following sample code corresponds to the D-LinkNet road segmentation model (using the TensorFlow engine).
- model_url: model directory. After a model version is selected on the GUI, this parameter is automatically added in the background.
- data_url: dataset directory. After a dataset version is selected on the GUI, this parameter is automatically added in the background.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import glob
import json
import logging
import os
import sys
import numpy as np
from PIL import Image
import moxing as mox
import tensorflow as tf
from deep_moxing.model_analysis.api import analyse
logging.basicConfig(level=logging.DEBUG)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to data files')
FLAGS(sys.argv, known_only=True)
def _norm(img):
mean = np.mean(img, axis=(0, 1), keepdims=True)
std = np.std(img, axis=(0, 1), keepdims=True)
img = (img - mean) / std
return img
def _preprocess(data_path):
img = Image.open(data_path)
img = img.convert('RGB')
img = np.asarray(img, dtype=np.float32)
img = _norm(img)
img = img[np.newaxis, :, :, :]
return img
def evalution():
pb_dir = FLAGS.model_url
data_path = FLAGS.data_url
if data_path.startswith('obs://'):
mox.file.copy_parallel(data_path, '/cache/dataset')
image_data_path = '/cache/dataset/eval_uint8'
label_path = '/cache/dataset/eval_label'
else:
image_data_path = os.path.join(data_path, 'eval_uint8')
label_path = os.path.join(data_path, 'eval_label')
if pb_dir.startswith('obs://'):
mox.file.copy_parallel(pb_dir, '/cache/model')
pb_dir = '/cache/model'
label_dict = {'0': 'background', '1': 'road'}
pred_list = []
file_name_list = []
img_list = []
label_list = []
label_file_list = glob.glob(label_path + '/*.' + 'png')
label_file_list = sorted(label_file_list)
for img_path in label_file_list:
label_img = Image.open(img_path)
label_img = np.asarray(label_img, dtype=np.uint8)
label_img = (label_img > 128).astype(np.int8)
label_list.append(label_img)
data_file_list = glob.glob(image_data_path + '/*.' + 'jpg')
data_file_list = sorted(data_file_list)
for img_path in data_file_list:
img = _preprocess(img_path)
img_list.append(img)
file_name_list.append(img_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
with tf.Session(graph=tf.Graph(), config=config) as sess:
meta_graph_def = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
signature = meta_graph_def.signature_def
signature_key = 'segmentation'
input_key = 'images'
output_key = 'logists'
x_tensor_name = signature[signature_key].inputs[input_key].name
y_tensor_name = signature[signature_key].outputs[output_key].name
x = sess.graph.get_tensor_by_name(x_tensor_name)
y = sess.graph.get_tensor_by_name(y_tensor_name)
for idx, img in enumerate(img_list):
pred_output, = sess.run([y], {x: img})
pred_output = np.squeeze(pred_output)
pred_list.append(pred_output.tolist())
logging.info(file_name_list[idx])
label_dict = json.dumps(label_dict)
task_type = 'image_segmentation'
# analyse
res = analyse(
task_type=task_type,
pred_list=pred_list,
label_list=label_list,
name_list=file_name_list,
label_map_dict=label_dict,)
if __name__ == "__main__":
evalution()
|
Sample Code for Models of the Object Detection Type
The training model corresponding to the following example code is the built-in algorithm Faster_RCNN_ResNet_v1_50 (TensorFlow engine).
- model_url: model directory. After a model version is selected on the GUI, this parameter is automatically added in the background.
- data_url: dataset directory. After a dataset version is selected on the GUI, this parameter is automatically added in the background.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import moxing as mox
from deep_moxing.model_analysis.api import analyse
from deep_moxing.framework.manifest_api.manifest_api import get_list
import tensorflow as tf
from PIL import Image
import numpy as np
import xml.etree.ElementTree as ET
import h5py
import os
import json
import logging
import time
import sys
logging.basicConfig(level=logging.DEBUG)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to output files')
FLAGS(sys.argv, known_only=True)
def _get_label(label_path, label_map_dict):
root = ET.parse(label_path).getroot()
bbox_list = []
label_list = []
for obj in root.iter('object'):
xml_box = obj.find('bndbox')
xmin = int(float(xml_box.find('xmin').text))
ymin = int(float(xml_box.find('ymin').text))
xmax = int(float(xml_box.find('xmax').text))
ymax = int(float(xml_box.find('ymax').text))
label_name = obj.find('name').text
bbox_list.append([ymin, xmin, ymax, xmax])
label_list.append(label_map_dict.get(label_name))
assert None not in label_list, 'dataset and model not match'
return [bbox_list, label_list]
def _preprocess(data_path):
img = Image.open(data_path)
img = img.convert('RGB')
img = np.asarray(img, dtype=np.float32)
img = img[np.newaxis, :, :, :]
return img
def get_data_ckpt_local():
pb_dir = FLAGS.model_url
data_path = FLAGS.data_url
data_file_list = []
label_file_list = []
if 'manifest' in data_path:
data_file_list, label_file_list = get_list(manifest_path=data_path)
print('------------- download ------------')
mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
pb_dir = '/cache/ckpt'
print('------------- download success ------------')
elif data_path.startswith('obs://'):
print('------------- download ------------')
mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
mox.file.copy_parallel(data_path, '/cache/data/')
pb_dir = '/cache/ckpt'
data_path = '/cache/data/'
print('------------- download success ------------')
if pb_dir:
assert os.path.isdir(pb_dir), 'Error, pb_dir must be a directory'
index_file = os.path.join(pb_dir, 'index')
label_list = []
file_name_list = []
img_list = []
try:
label_file = h5py.File(index_file, 'r')
label_array = label_file['labels_list'][:].tolist()
label_array = [item.decode('utf-8') for item in label_array]
except Exception as e:
logging.warning(e)
logging.warning('index file is not a h5 file, try json.')
with open(index_file, 'r') as load_f:
label_file = json.load(load_f)
label_array = label_file['labels_list'][:]
label_map_dict = {}
label_dict = {}
for idx, item in enumerate(label_array):
label_map_dict[item] = idx
label_dict[idx] = item
if 'manifest' in data_path:
for img_path, xml_path in zip(data_file_list, label_file_list):
label = _get_label(xml_path, label_map_dict)
img = _preprocess(img_path)
label_list.append(label)
img_list.append(img)
file_name_list.append(img_path)
else:
file_list = os.listdir(data_path)
for item in file_list:
if ('jpg' in item) or ('bmp' in item) or ('png' in item):
xml_path = os.path.join(data_path, item.split('.')[0] + '.xml')
img_path = os.path.join(data_path, item)
label = _get_label(xml_path, label_map_dict)
img = _preprocess(img_path)
label_list.append(label)
img_list.append(img)
file_name_list.append(img_path)
else:
continue
assert len(label_list) > 0, 'missing valid data'
return pb_dir, label_list, label_dict, file_name_list, img_list
def evalution():
pred_list = []
pb_dir, label_list, label_dict, file_name_list, img_list = get_data_ckpt_local()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
with tf.Session(graph=tf.Graph(), config=config) as sess:
meta_graph_def = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
signature = meta_graph_def.signature_def
signature_key = 'predict_object'
input_key = 'images'
output_key0 = 'detection_boxes'
output_key1 = 'detection_classes'
output_key2 = 'detection_scores'
x_tensor_name = signature[signature_key].inputs[input_key].name
y_tensor_name0 = signature[signature_key].outputs[output_key0].name
y_tensor_name1 = signature[signature_key].outputs[output_key1].name
y_tensor_name2 = signature[signature_key].outputs[output_key2].name
x = sess.graph.get_tensor_by_name(x_tensor_name)
y0 = sess.graph.get_tensor_by_name(y_tensor_name0)
y1 = sess.graph.get_tensor_by_name(y_tensor_name1)
y2 = sess.graph.get_tensor_by_name(y_tensor_name2)
start = time.time()
for img in img_list:
pred_detection_boxes, pred_detection_classes, \
pred_detection_scores = sess.run([y0, y1, y2], {x: img})
if pred_detection_boxes.ndim == 3:
pred_detection_boxes = pred_detection_boxes[0]
pred_detection_classes = pred_detection_classes[0]
pred_detection_scores = pred_detection_scores[0]
pred_list.append([
pred_detection_boxes.tolist(),
(pred_detection_classes - 1).tolist(),
pred_detection_scores.tolist()
])
end = time.time()
fps = len(img_list) / (end - start)
diy_metric = {'fps': {'value': {'fps': fps}}}
label_dict = json.dumps(label_dict)
task_type = 'image_object_detection'
# analyse
res = analyse(
task_type=task_type,
pred_list=pred_list,
label_list=label_list,
name_list=file_name_list,
custom_metric=diy_metric,
label_map_dict=label_dict)
if __name__ == "__main__":
evalution()
|
Last Article: Model Evaluation API
Next Article: Model Optimization Suggestions
Did this article solve your problem?
Thank you for your score!Your feedback would help us improve the website.