fromkeras.modelsimportSequentialmodel=Sequential()fromkeras.layersimportDenseimporttensorflowastf# Import a training dataset.mnist=tf.keras.datasets.mnist(x_train,y_train),(x_test,y_test)=mnist.load_data()x_train,x_test=x_train/255.0,x_test/255.0print(x_train.shape)fromkeras.layersimportDensefromkeras.modelsimportSequentialimportkerasfromkeras.layersimportDense,Activation,Flatten,Dropout# Define a model network.model=Sequential()model.add(Flatten(input_shape=(28,28)))model.add(Dense(units=5120,activation='relu'))model.add(Dropout(0.2))model.add(Dense(units=10,activation='softmax'))# Define an optimizer and loss functions.model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])model.summary()# Train the model.model.fit(x_train,y_train,epochs=2)# Evaluate the model.model.evaluate(x_test,y_test)
fromkerasimportbackendasK# K.get_session().run(tf.global_variables_initializer())# Define the inputs and outputs of the prediction API.# The key values of the inputs and outputs dictionaries are used as the index keys for the input and output tensors of the model.# The input and output definitions of the model must match the custom inference script.predict_signature=tf.saved_model.signature_def_utils.predict_signature_def(inputs={"images":model.input},outputs={"scores":model.output})# Define a save path.builder=tf.saved_model.builder.SavedModelBuilder('./mnist_keras/')builder.add_meta_graph_and_variables(sess=K.get_session(),# The tf.saved_model.tag_constants.SERVING tag needs to be defined for inference and deployment.tags=[tf.saved_model.tag_constants.SERVING],""" signature_def_map: Only single items can exist, or the corresponding key needs to be defined as follows: tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY """signature_def_map={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:predict_signature})builder.save()
from__future__importprint_functionimportgzipimportosimporturllibimportnumpyimporttensorflowastffromsix.movesimporturllib# Training data is obtained from the Yann LeCun official website http://yann.lecun.com/exdb/mnist/.SOURCE_URL='http://yann.lecun.com/exdb/mnist/'TRAIN_IMAGES='train-images-idx3-ubyte.gz'TRAIN_LABELS='train-labels-idx1-ubyte.gz'TEST_IMAGES='t10k-images-idx3-ubyte.gz'TEST_LABELS='t10k-labels-idx1-ubyte.gz'VALIDATION_SIZE=5000defmaybe_download(filename,work_directory):"""Download the data from Yann's website, unless it's already here."""ifnotos.path.exists(work_directory):os.mkdir(work_directory)filepath=os.path.join(work_directory,filename)ifnotos.path.exists(filepath):filepath,_=urllib.request.urlretrieve(SOURCE_URL+filename,filepath)statinfo=os.stat(filepath)print('Successfully downloaded %s%d bytes.'%(filename,statinfo.st_size))returnfilepathdef_read32(bytestream):dt=numpy.dtype(numpy.uint32).newbyteorder('>')returnnumpy.frombuffer(bytestream.read(4),dtype=dt)[0]defextract_images(filename):"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""print('Extracting %s'%filename)withgzip.open(filename)asbytestream:magic=_read32(bytestream)ifmagic!=2051:raiseValueError('Invalid magic number %d in MNIST image file: %s'%(magic,filename))num_images=_read32(bytestream)rows=_read32(bytestream)cols=_read32(bytestream)buf=bytestream.read(rows*cols*num_images)data=numpy.frombuffer(buf,dtype=numpy.uint8)data=data.reshape(num_images,rows,cols,1)returndatadefdense_to_one_hot(labels_dense,num_classes=10):"""Convert class labels from scalars to one-hot vectors."""num_labels=labels_dense.shape[0]index_offset=numpy.arange(num_labels)*num_classeslabels_one_hot=numpy.zeros((num_labels,num_classes))labels_one_hot.flat[index_offset+labels_dense.ravel()]=1returnlabels_one_hotdefextract_labels(filename,one_hot=False):"""Extract the labels into a 1D uint8 numpy array [index]."""print('Extracting %s'%filename)withgzip.open(filename)asbytestream:magic=_read32(bytestream)ifmagic!=2049:raiseValueError('Invalid magic number %d in MNIST label file: %s'%(magic,filename))num_items=_read32(bytestream)buf=bytestream.read(num_items)labels=numpy.frombuffer(buf,dtype=numpy.uint8)ifone_hot:returndense_to_one_hot(labels)returnlabelsclassDataSet(object):"""Class encompassing test, validation and training MNIST data set."""def__init__(self,images,labels,fake_data=False,one_hot=False):"""Construct a DataSet. one_hot arg is used only if fake_data is true."""iffake_data:self._num_examples=10000self.one_hot=one_hotelse:assertimages.shape[0]==labels.shape[0],('images.shape: %s labels.shape: %s'%(images.shape,labels.shape))self._num_examples=images.shape[0]# Convert shape from [num examples, rows, columns, depth]# to [num examples, rows*columns] (assuming depth == 1)assertimages.shape[3]==1images=images.reshape(images.shape[0],images.shape[1]*images.shape[2])# Convert from [0, 255] -> [0.0, 1.0].images=images.astype(numpy.float32)images=numpy.multiply(images,1.0/255.0)self._images=imagesself._labels=labelsself._epochs_completed=0self._index_in_epoch=0@propertydefimages(self):returnself._images@propertydeflabels(self):returnself._labels@propertydefnum_examples(self):returnself._num_examples@propertydefepochs_completed(self):returnself._epochs_completeddefnext_batch(self,batch_size,fake_data=False):"""Return the next `batch_size` examples from this data set."""iffake_data:fake_image=[1]*784ifself.one_hot:fake_label=[1]+[0]*9else:fake_label=0return[fake_imagefor_inrange(batch_size)],[fake_labelfor_inrange(batch_size)]start=self._index_in_epochself._index_in_epoch+=batch_sizeifself._index_in_epoch>self._num_examples:# Finished epochself._epochs_completed+=1# Shuffle the dataperm=numpy.arange(self._num_examples)numpy.random.shuffle(perm)self._images=self._images[perm]self._labels=self._labels[perm]# Start next epochstart=0self._index_in_epoch=batch_sizeassertbatch_size<=self._num_examplesend=self._index_in_epochreturnself._images[start:end],self._labels[start:end]defread_data_sets(train_dir,fake_data=False,one_hot=False):"""Return training, validation and testing data sets."""classDataSets(object):passdata_sets=DataSets()iffake_data:data_sets.train=DataSet([],[],fake_data=True,one_hot=one_hot)data_sets.validation=DataSet([],[],fake_data=True,one_hot=one_hot)data_sets.test=DataSet([],[],fake_data=True,one_hot=one_hot)returndata_setslocal_file=maybe_download(TRAIN_IMAGES,train_dir)train_images=extract_images(local_file)local_file=maybe_download(TRAIN_LABELS,train_dir)train_labels=extract_labels(local_file,one_hot=one_hot)local_file=maybe_download(TEST_IMAGES,train_dir)test_images=extract_images(local_file)local_file=maybe_download(TEST_LABELS,train_dir)test_labels=extract_labels(local_file,one_hot=one_hot)validation_images=train_images[:VALIDATION_SIZE]validation_labels=train_labels[:VALIDATION_SIZE]train_images=train_images[VALIDATION_SIZE:]train_labels=train_labels[VALIDATION_SIZE:]data_sets.train=DataSet(train_images,train_labels)data_sets.validation=DataSet(validation_images,validation_labels)data_sets.test=DataSet(test_images,test_labels)returndata_setstraining_iteration=1000modelarts_example_path='./modelarts-mnist-train-save-deploy-example'export_path=modelarts_example_path+'/model/'data_path='./'print('Training model...')mnist=read_data_sets(data_path,one_hot=True)sess=tf.InteractiveSession()serialized_tf_example=tf.placeholder(tf.string,name='tf_example')feature_configs={'x':tf.FixedLenFeature(shape=[784],dtype=tf.float32),}tf_example=tf.parse_example(serialized_tf_example,feature_configs)x=tf.identity(tf_example['x'],name='x')# use tf.identity() to assign namey_=tf.placeholder('float',shape=[None,10])w=tf.Variable(tf.zeros([784,10]))b=tf.Variable(tf.zeros([10]))sess.run(tf.global_variables_initializer())y=tf.nn.softmax(tf.matmul(x,w)+b,name='y')cross_entropy=-tf.reduce_sum(y_*tf.log(y))train_step=tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)values,indices=tf.nn.top_k(y,10)table=tf.contrib.lookup.index_to_string_table_from_tensor(tf.constant([str(i)foriinrange(10)]))prediction_classes=table.lookup(tf.to_int64(indices))for_inrange(training_iteration):batch=mnist.train.next_batch(50)train_step.run(feed_dict={x:batch[0],y_:batch[1]})correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))accuracy=tf.reduce_mean(tf.cast(correct_prediction,'float'))print('training accuracy %g'%sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels}))print('Done training!')
# Export the model.# The model needs to be saved using the saved_model API.print('Exporting trained model to',export_path)builder=tf.saved_model.builder.SavedModelBuilder(export_path)tensor_info_x=tf.saved_model.utils.build_tensor_info(x)tensor_info_y=tf.saved_model.utils.build_tensor_info(y)# Define the inputs and outputs of the prediction API.# The key values of the inputs and outputs dictionaries are used as the index keys for the input and output tensors of the model.# The input and output definitions of the model must match the custom inference script.prediction_signature=(tf.saved_model.signature_def_utils.build_signature_def(inputs={'images':tensor_info_x},outputs={'scores':tensor_info_y},method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))legacy_init_op=tf.group(tf.tables_initializer(),name='legacy_init_op')builder.add_meta_graph_and_variables(# Set tag to serve/tf.saved_model.tag_constants.SERVING.sess,[tf.saved_model.tag_constants.SERVING],signature_def_map={'predict_images':prediction_signature,},legacy_init_op=legacy_init_op)builder.save()print('Done exporting!')
Inference Code (Keras and tf APIs)
In the model inference code file customize_service.py, add a child model class which inherits properties from its parent model class. For details about the import statements of different types of parent model classes, see Table 1.
fromPILimportImageimportnumpyasnpfrommodel_service.tfserving_model_serviceimportTfServingBaseServiceclassMnistService(TfServingBaseService):# Match the model input with the user's HTTPS API input during preprocessing.# The model input corresponding to the preceding training part is {"images":<array>}.def_preprocess(self,data):preprocessed_data={}images=[]# Iterate the input data.fork,vindata.items():forfile_name,file_contentinv.items():image1=Image.open(file_content)image1=np.array(image1,dtype=np.float32)image1.resize((1,784))images.append(image1)# Return the numpy array.images=np.array(images,dtype=np.float32)# Perform batch processing on multiple input samples and ensure that the shape is the same as that inputted during training.images.resize((len(data),784))preprocessed_data['images']=imagesreturnpreprocessed_data# Processing logic of the inference for invoking the parent class.# The output corresponding to model saving in the preceding training part is {"scores":<array>}.# Postprocess the HTTPS output.def_postprocess(self,data):infer_output={"mnist_result":[]}# Iterate the model output.foroutput_name,resultsindata.items():forresultinresults:infer_output["mnist_result"].append(result.index(max(result)))returninfer_output