文档首页 > > AI工程师用户指南> 管理模型> 评估和诊断模型> 模型评估代码示例

模型评估代码示例

分享
更新时间:2020/10/15 GMT+08:00

针对图像分类、图像语义分割、物体检测等常用场景,提供代码示例,您可以参考示例编写您的评估代码。

图像分类评估代码示例

以下样例代码对应训练模型为预置算法ResNet_v1_50(TensorFlow引擎)。

  • “model_url”:模型目录,界面上选择模型版本后,后台自动添加此参数,无需手工添加。
  • “data_url”:数据集目录,界面上选择数据集版本后,后台自动添加此参数,无需手工添加。
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import json
import logging
import os
import sys
import tempfile

import h5py
import numpy as np
from PIL import Image

import moxing as mox
import tensorflow as tf
from deep_moxing.framework.manifest_api.manifest_api import get_sample_list
from deep_moxing.model_analysis.api import analyse, tmp_save, get_advanced_metric_info
from deep_moxing.model_analysis.common.constant import TMP_FILE_NAME

logging.basicConfig(level=logging.DEBUG)

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to output files')
tf.app.flags.DEFINE_string('adv_param_json',
                           '{"attack_method":"i-FGSM","eps":30, "iter_times":4}',
                           'params for attacks')
FLAGS(sys.argv, known_only=True)


def _preprocess(data_path):
    img = Image.open(data_path)
    img = img.convert('RGB')
    img = np.asarray(img, dtype=np.float32)
    img = img[np.newaxis, :, :, :]
    return img


def softmax(x):
    x = np.array(x)
    orig_shape = x.shape
    if len(x.shape) > 1:
        # Matrix
        x = np.apply_along_axis(lambda x: np.exp(x - np.max(x)), 1, x)
        denominator = np.apply_along_axis(lambda x: 1.0 / np.sum(x), 1, x)
        if len(denominator.shape) == 1:
            denominator = denominator.reshape((denominator.shape[0], 1))
        x = x * denominator
    else:
        # Vector
        x_max = np.max(x)
        x = x - x_max
        numerator = np.exp(x)
        denominator = 1.0 / np.sum(numerator)
        x = numerator.dot(denominator)
    assert x.shape == orig_shape
    return x


def get_dataset(data_path, label_map_dict):
    label_list = []
    img_name_list = []
    if 'manifest' in data_path:
        manifest, _ = get_sample_list(
            manifest_path=data_path, task_type='image_classification')
        for item in manifest:
            if len(item[1]) != 0:
                label_list.append(label_map_dict.get(item[1][0]))
                img_name_list.append(item[0])
            else:
                continue
    else:
        label_name_list = os.listdir(data_path)
        label_dict = {}
        for idx, item in enumerate(label_name_list):
            label_dict[str(idx)] = item
            sub_img_list = os.listdir(os.path.join(data_path, item))
            img_name_list += [
                os.path.join(data_path, item, img_name) for img_name in sub_img_list
            ]
            label_list += [label_map_dict.get(item)] * len(sub_img_list)
    return img_name_list, label_list


def deal_ckpt_and_data_with_s3():
    pb_dir = FLAGS.model_url
    data_path = FLAGS.data_url

    if pb_dir.startswith('s3://') or pb_dir.startswith('obs://'):
        mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
        pb_dir = '/cache/ckpt'
        print('------------- download success ------------')
    if data_path.startswith('s3://') or data_path.startswith('obs://'):
        mox.file.copy_parallel(data_path, '/cache/data/')
        data_path = '/cache/data/'
        print('------------- download dataset success ------------')
    assert os.path.isdir(pb_dir), 'Error, pb_dir must be a directory'
    return pb_dir, data_path


def evalution():
    pb_dir, data_path = deal_ckpt_and_data_with_s3()
    adv_param_json = FLAGS.adv_param_json
    index_file = os.path.join(pb_dir, 'index')
    try:
        label_file = h5py.File(index_file, 'r')
        label_array = label_file['labels_list'][:].tolist()
        label_array = [item.decode('utf-8') for item in label_array]
    except Exception as e:
        logging.warning(e)
        logging.warning('index file is not a h5 file, try json.')
        with open(index_file, 'r') as load_f:
            label_file = json.load(load_f)
        label_array = label_file['labels_list'][:]
    label_map_dict = {}
    label_dict = {}
    for idx, item in enumerate(label_array):
        label_map_dict[item] = idx
        label_dict[idx] = item
    print(label_map_dict)
    print(label_dict)

    data_file_list, label_list = get_dataset(data_path, label_map_dict)

    assert len(label_list) > 0, 'missing valid data'
    assert None not in label_list, 'dataset and model not match'

    pred_list = []
    file_name_list = []
    img_list = []
    for img_path in data_file_list:
        img = _preprocess(img_path)
        img_list.append(img)
        file_name_list.append(img_path)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = '0'
    with tf.Session(graph=tf.Graph(), config=config) as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
        signature = meta_graph_def.signature_def
        signature_key = 'predict_object'
        input_key = 'images'
        output_key = 'logits'
        x_tensor_name = signature[signature_key].inputs[input_key].name
        y_tensor_name = signature[signature_key].outputs[output_key].name
        x = sess.graph.get_tensor_by_name(x_tensor_name)
        y = sess.graph.get_tensor_by_name(y_tensor_name)
        for img in img_list:
            pred_output = sess.run([y], {x: img})
            pred_output = softmax(pred_output[0])
            pred_list.append(pred_output[0].tolist())

    label_dict = json.dumps(label_dict)
    task_type = 'image_classification'

    # analyse
    res = analyse(
        task_type=task_type,
        pred_list=pred_list,
        label_list=label_list,
        name_list=file_name_list,
        label_map_dict=label_dict)


if __name__ == "__main__":
    evalution()

图像语义分割评估代码示例

以下样例代码对应训练模型为D-LinkNet道路分割模型(TensorFlow引擎)。

  • “model_url”:模型目录,界面上选择模型版本后,后台自动添加此参数,无需手工添加。默认为导入模型时选择的最上一级目录。
  • “data_url”:数据集目录,界面上选择数据集版本后,后台自动添加此参数,无需手工添加
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
import glob
import json
import logging
import os
import sys

import numpy as np
from PIL import Image

import moxing as mox
import tensorflow as tf
from deep_moxing.model_analysis.api import analyse

logging.basicConfig(level=logging.DEBUG)

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to data files')
FLAGS(sys.argv, known_only=True)


def _norm(img):
    mean = np.mean(img, axis=(0, 1), keepdims=True)
    std = np.std(img, axis=(0, 1), keepdims=True)
    img = (img - mean) / std
    return img


def _preprocess(data_path):
    img = Image.open(data_path)
    img = img.convert('RGB')
    img = np.asarray(img, dtype=np.float32)
    img = _norm(img)
    img = img[np.newaxis, :, :, :]
    return img


def evalution():
    pb_dir = FLAGS.model_url
    data_path = FLAGS.data_url

    if data_path.startswith('s3://') or data_path.startswith('obs://'):
        mox.file.copy_parallel(data_path, '/cache/dataset')
        image_data_path = '/cache/dataset/eval_uint8'
        label_path = '/cache/dataset/eval_label'
    else:
        image_data_path = os.path.join(data_path, 'eval_uint8')
        label_path = os.path.join(data_path, 'eval_label')
    if pb_dir.startswith('s3://') or pb_dir.startswith('obs://'):
        mox.file.copy_parallel(pb_dir, '/cache/model')
        pb_dir = '/cache/model'

    label_dict = {'0': 'background', '1': 'road'}

    pred_list = []
    file_name_list = []
    img_list = []

    label_list = []
    label_file_list = glob.glob(label_path + '/*.' + 'png')
    label_file_list = sorted(label_file_list)
    for img_path in label_file_list:
        label_img = Image.open(img_path)
        label_img = np.asarray(label_img, dtype=np.uint8)
        label_img = (label_img > 128).astype(np.int8)
        label_list.append(label_img)

    data_file_list = glob.glob(image_data_path + '/*.' + 'jpg')
    data_file_list = sorted(data_file_list)
    for img_path in data_file_list:
        img = _preprocess(img_path)
        img_list.append(img)
        file_name_list.append(img_path)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = '0'
    with tf.Session(graph=tf.Graph(), config=config) as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
        signature = meta_graph_def.signature_def
        signature_key = 'segmentation'
        input_key = 'images'
        output_key = 'logists'
        x_tensor_name = signature[signature_key].inputs[input_key].name
        y_tensor_name = signature[signature_key].outputs[output_key].name
        x = sess.graph.get_tensor_by_name(x_tensor_name)
        y = sess.graph.get_tensor_by_name(y_tensor_name)
        for idx, img in enumerate(img_list):
            pred_output, = sess.run([y], {x: img})
            pred_output = np.squeeze(pred_output)
            pred_list.append(pred_output.tolist())
            logging.info(file_name_list[idx])

    label_dict = json.dumps(label_dict)
    task_type = 'image_segmentation'

    # analyse
    res = analyse(
        task_type=task_type,
        pred_list=pred_list,
        label_list=label_list,
        name_list=file_name_list,
        label_map_dict=label_dict,)


if __name__ == "__main__":
    evalution()

物体检测评估代码示例

以下样例代码对应训练模型为预置算法Faster_RCNN_ResNet_v1_50(TensorFlow引擎)。

  • “model_url”:模型目录,界面上选择模型版本后,后台自动添加此参数,无需手工添加。默认为导入模型时选择的最上一级目录。
  • “data_url”:数据集目录,界面上选择数据集版本后,后台自动添加此参数,无需手工添加。
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import moxing as mox
from deep_moxing.model_analysis.api import analyse
from deep_moxing.framework.manifest_api.manifest_api import get_list
import tensorflow as tf
from PIL import Image
import numpy as np
import xml.etree.ElementTree as ET
import h5py
import os
import json
import logging
import time
import sys

logging.basicConfig(level=logging.DEBUG)

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_url', '', 'path to saved model')
tf.app.flags.DEFINE_string('data_url', '', 'path to output files')
FLAGS(sys.argv, known_only=True)


def _get_label(label_path, label_map_dict):
    root = ET.parse(label_path).getroot()
    bbox_list = []
    label_list = []
    for obj in root.iter('object'):
        xml_box = obj.find('bndbox')
        xmin = int(float(xml_box.find('xmin').text))
        ymin = int(float(xml_box.find('ymin').text))
        xmax = int(float(xml_box.find('xmax').text))
        ymax = int(float(xml_box.find('ymax').text))
        label_name = obj.find('name').text
        bbox_list.append([ymin, xmin, ymax, xmax])
        label_list.append(label_map_dict.get(label_name))
    assert None not in label_list, 'dataset and model not match'
    return [bbox_list, label_list]


def _preprocess(data_path):
    img = Image.open(data_path)
    img = img.convert('RGB')
    img = np.asarray(img, dtype=np.float32)
    img = img[np.newaxis, :, :, :]
    return img


def get_data_ckpt_local():
    pb_dir = FLAGS.model_url
    data_path = FLAGS.data_url
    data_file_list = []
    label_file_list = []
    if 'manifest' in data_path:
        data_file_list, label_file_list = get_list(manifest_path=data_path)
        print('------------- download ------------')
        mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
        pb_dir = '/cache/ckpt'
        print('------------- download success ------------')
    elif data_path.startswith('s3://') or data_path.startswith('obs://'):
        print('------------- download ------------')
        mox.file.copy_parallel(pb_dir, '/cache/ckpt/')
        mox.file.copy_parallel(data_path, '/cache/data/')
        pb_dir = '/cache/ckpt'
        data_path = '/cache/data/'
        print('------------- download success ------------')

    if pb_dir:
        assert os.path.isdir(pb_dir), 'Error, pb_dir must be a directory'

    index_file = os.path.join(pb_dir, 'index')
    label_list = []
    file_name_list = []
    img_list = []
    try:
        label_file = h5py.File(index_file, 'r')
        label_array = label_file['labels_list'][:].tolist()
        label_array = [item.decode('utf-8') for item in label_array]
    except Exception as e:
        logging.warning(e)
        logging.warning('index file is not a h5 file, try json.')
        with open(index_file, 'r') as load_f:
            label_file = json.load(load_f)
        label_array = label_file['labels_list'][:]
    label_map_dict = {}
    label_dict = {}
    for idx, item in enumerate(label_array):
        label_map_dict[item] = idx
        label_dict[idx] = item
    if 'manifest' in data_path:
        for img_path, xml_path in zip(data_file_list, label_file_list):
            label = _get_label(xml_path, label_map_dict)
            img = _preprocess(img_path)
            label_list.append(label)
            img_list.append(img)
            file_name_list.append(img_path)
    else:
        file_list = os.listdir(data_path)
        for item in file_list:
            if ('jpg' in item) or ('bmp' in item) or ('png' in item):
                xml_path = os.path.join(data_path, item.split('.')[0] + '.xml')
                img_path = os.path.join(data_path, item)
                label = _get_label(xml_path, label_map_dict)
                img = _preprocess(img_path)
                label_list.append(label)
                img_list.append(img)
                file_name_list.append(img_path)
            else:
                continue
    assert len(label_list) > 0, 'missing valid data'
    return pb_dir, label_list, label_dict, file_name_list, img_list


def evalution():
    pred_list = []
    pb_dir, label_list, label_dict, file_name_list, img_list = get_data_ckpt_local()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = '0'
    with tf.Session(graph=tf.Graph(), config=config) as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], pb_dir)
        signature = meta_graph_def.signature_def
        signature_key = 'predict_object'
        input_key = 'images'
        output_key0 = 'detection_boxes'
        output_key1 = 'detection_classes'
        output_key2 = 'detection_scores'
        x_tensor_name = signature[signature_key].inputs[input_key].name
        y_tensor_name0 = signature[signature_key].outputs[output_key0].name
        y_tensor_name1 = signature[signature_key].outputs[output_key1].name
        y_tensor_name2 = signature[signature_key].outputs[output_key2].name
        x = sess.graph.get_tensor_by_name(x_tensor_name)
        y0 = sess.graph.get_tensor_by_name(y_tensor_name0)
        y1 = sess.graph.get_tensor_by_name(y_tensor_name1)
        y2 = sess.graph.get_tensor_by_name(y_tensor_name2)
        start = time.time()
        for img in img_list:
            pred_detection_boxes, pred_detection_classes, \
                pred_detection_scores = sess.run([y0, y1, y2], {x: img})
            if pred_detection_boxes.ndim == 3:
                pred_detection_boxes = pred_detection_boxes[0]
                pred_detection_classes = pred_detection_classes[0]
                pred_detection_scores = pred_detection_scores[0]
            pred_list.append([
                pred_detection_boxes.tolist(),
                (pred_detection_classes - 1).tolist(),
                pred_detection_scores.tolist()
            ])
        end = time.time()
        fps = len(img_list) / (end - start)

    diy_metric = {'fps': {'value': {'fps': fps}}}
    label_dict = json.dumps(label_dict)
    task_type = 'image_object_detection'

    # analyse
    res = analyse(
        task_type=task_type,
        pred_list=pred_list,
        label_list=label_list,
        name_list=file_name_list,
        custom_metric=diy_metric,
        label_map_dict=label_dict)



if __name__ == "__main__":
    evalution()
分享:

    相关文档

    相关产品

文档是否有解决您的问题?

提交成功!非常感谢您的反馈,我们会继续努力做到更好!
反馈提交失败,请稍后再试!

*必选

请至少选择或填写一项反馈信息

字符长度不能超过200

提交反馈 取消

如您有其它疑问,您也可以通过华为云社区问答频道来与我们联系探讨

智能客服提问云社区提问