import os
import sys
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from keras import backend as k
from tensorflow.python.framework.graph_util_impl import convert_variables_to_constants
import tensorflow as tf
import mrcnn.model as modellib
class outConfig(Config):
NAME = "balloon"
NUM_CLASSES = 1 + 1
BACKBONE = "resnet101"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MAX_INSTANCES = 100
MODEL_DIR = os.path.join(ROOT_DIR, "model/balloon/logs") #모델 저장 폴더
out_config = outConfig()
out_config.display()
model = modellib.MaskRCNN(mode="inference", config=out_config,model_dir=MODEL_DIR)
model_path = model.find_last()
model.load_weights(model_path, by_name=True)
session = k.get_session()
min_graph = convert_variables_to_constants(session,session.graph_def,[out.op.name for out in model.keras_model.outputs])
tf.train.write_graph(min_graph, './', 'mrcnn.pb', as_text=False)
outConfig의 내용은 이전 inference 에서 나왔던 값들과 동일하게 맞춤
(그게 맞는지는 모름 -_-)
* 실행 성공하면 mrcnn.pb파일이 생성됨
2. 로드 후 테스트 하려면
1) MASK_RCNN/mrcnn/model.py
- MaskRCNN클래스에 다음의 멤버함수를 추가한다
def detect_pb(self, images, sessd, input_image, input_image_meta, input_anchors, detections, mrcnn_mask, verbose=1):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
# if verbose:
# log("Processing {} images".format(len(images)))
# for image in images:
# log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
#print('molded_images : ', molded_images)
#print('image_metas : ', image_metas)
#print('windows : ', windows)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
# print(image_shape, molded_images.shape)
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
# if verbose:
# log("molded_images", molded_images)
# log("image_metas", image_metas)
# log("anchors", anchors)
# Run object detection
# detections, _, _, mrcnn_mask, _, _, _ =\
# self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
detectionsed, mrcnn_masked = sessd.run([detections, mrcnn_mask], feed_dict = {input_image: molded_images, \
input_image_meta: image_metas, \
input_anchors: anchors})
print('detectionsed : ', detectionsed.shape)
print('mrcnn_masked : ', mrcnn_masked.shape)
mrcnn_masked = np.expand_dims(mrcnn_masked, 0)
detections = np.array(detectionsed)
mrcnn_mask = np.array(mrcnn_masked)
print('detections : ', detections.shape)
print('mrcnn_mask : ', mrcnn_mask.shape)
# Process detections
results = []
for i, image in enumerate(images):
xi = detections[i]
yi = mrcnn_mask[i]
moldedi = molded_images[i]
windowsi = windows[i]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results