딥러닝

tensorflow 1.15를 사용하여 Mask Rcnn 테스트(3) - freezing

카멜레온개발자 2022. 2. 7. 20:20

1. 다음과 같이 작성한다.

balloon_freeze.py

import os
import sys

ROOT_DIR = os.path.abspath("../../")

# Import Mask RCNN
sys.path.append(ROOT_DIR)  # To find local version of the library

from mrcnn.config import Config
from keras import backend as k
from tensorflow.python.framework.graph_util_impl import convert_variables_to_constants
import tensorflow as tf
import mrcnn.model as modellib

class outConfig(Config):
    NAME = "balloon"
    NUM_CLASSES = 1 + 1
    BACKBONE = "resnet101"
    IMAGE_MIN_DIM = 800
    IMAGE_MAX_DIM = 1024
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
    DETECTION_MAX_INSTANCES = 100

MODEL_DIR = os.path.join(ROOT_DIR, "model/balloon/logs") #모델 저장 폴더
out_config = outConfig()
out_config.display()

model = modellib.MaskRCNN(mode="inference", config=out_config,model_dir=MODEL_DIR)
model_path = model.find_last()
model.load_weights(model_path, by_name=True)

session = k.get_session()

min_graph = convert_variables_to_constants(session,session.graph_def,[out.op.name for out in model.keras_model.outputs])
tf.train.write_graph(min_graph, './', 'mrcnn.pb', as_text=False)

outConfig의 내용은 이전 inference 에서 나왔던 값들과 동일하게 맞춤

(그게 맞는지는 모름 -_-)

 

* 실행 성공하면 mrcnn.pb파일이 생성됨

 

 

2. 로드 후 테스트 하려면 

1) MASK_RCNN/mrcnn/model.py 

  - MaskRCNN클래스에 다음의 멤버함수를 추가한다

def detect_pb(self, images, sessd, input_image, input_image_meta, input_anchors, detections, mrcnn_mask, verbose=1):
        """Runs the detection pipeline.

        images: List of images, potentially of different sizes.

        Returns a list of dicts, one dict per image. The dict contains:
        rois: [N, (y1, x1, y2, x2)] detection bounding boxes
        class_ids: [N] int class IDs
        scores: [N] float probability scores for the class IDs
        masks: [H, W, N] instance binary masks
        """
        assert self.mode == "inference", "Create model in inference mode."
        assert len(
            images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"

        # if verbose:
        #     log("Processing {} images".format(len(images)))
        #     for image in images:
        #         log("image", image)

        # Mold inputs to format expected by the neural network
        molded_images, image_metas, windows = self.mold_inputs(images)

        #print('molded_images : ', molded_images)
        #print('image_metas : ', image_metas)
        #print('windows : ', windows)

        # Validate image sizes
        # All images in a batch MUST be of the same size
        image_shape = molded_images[0].shape
        # print(image_shape, molded_images.shape)
        for g in molded_images[1:]:
            assert g.shape == image_shape,\
                "After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."

        # Anchors
        anchors = self.get_anchors(image_shape)
        # Duplicate across the batch dimension because Keras requires it
        # TODO: can this be optimized to avoid duplicating the anchors?
        anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)

        # if verbose:
        #     log("molded_images", molded_images)
        #     log("image_metas", image_metas)
        #     log("anchors", anchors)
        # Run object detection
        # detections, _, _, mrcnn_mask, _, _, _ =\
        #     self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
        detectionsed, mrcnn_masked = sessd.run([detections, mrcnn_mask], feed_dict = {input_image: molded_images, \
                                                                input_image_meta: image_metas, \
                                                                input_anchors: anchors})

        print('detectionsed : ', detectionsed.shape)
        print('mrcnn_masked : ', mrcnn_masked.shape)
        mrcnn_masked = np.expand_dims(mrcnn_masked, 0)
        detections = np.array(detectionsed)
        mrcnn_mask = np.array(mrcnn_masked)
        
        print('detections : ', detections.shape)
        print('mrcnn_mask : ', mrcnn_mask.shape)
        
        # Process detections
        results = []
        for i, image in enumerate(images):
            xi = detections[i]
            yi = mrcnn_mask[i]
            moldedi = molded_images[i]
            windowsi = windows[i]
            final_rois, final_class_ids, final_scores, final_masks =\
                self.unmold_detections(detections[i], mrcnn_mask[i],
                                        image.shape, molded_images[i].shape,
                                        windows[i])
            results.append({
                "rois": final_rois,
                "class_ids": final_class_ids,
                "scores": final_scores,
                "masks": final_masks,
            })
        return results

 

3. 생성된 pb파일을 테스트하기 위해서 다음과 같이 작성

import os
import sys

ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR)  # To find local version of the library


import cv2
import tensorflow as tf
import mrcnn.model as modellib
from mrcnn.config import Config

class outConfig(Config):
    NAME = "balloon"
    NUM_CLASSES = 1 + 1
    BACKBONE = "resnet101"
    IMAGE_MIN_DIM = 800
    IMAGE_MAX_DIM = 1024
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
    DETECTION_MAX_INSTANCES = 100

MODEL_DIR = os.path.join(ROOT_DIR, "model/balloon/logs") #모델 저장 폴더

out_config = outConfig()
out_config.display()
#results = model.detect([img], verbose=1)

def load_detection_model(model):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(model, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
        input_image = tf.get_default_graph().get_tensor_by_name('input_image:0')
        input_image_meta = tf.get_default_graph().get_tensor_by_name('input_image_meta:0')
        input_anchors = tf.get_default_graph().get_tensor_by_name('input_anchors:0')
        detections = tf.get_default_graph().get_tensor_by_name('mrcnn_detection/Reshape_1:0')
        #mrcnn_mask = tf.get_default_graph().get_tensor_by_name('mrcnn_mask/Sigmoid:0')
        mrcnn_mask = tf.get_default_graph().get_tensor_by_name('mrcnn_mask/Sigmoid:0')
    sessd=tf.Session(config=config,graph=detection_graph)
    print('Loaded detection model from file "%s"' % model)
    return sessd, input_image, input_image_meta, input_anchors, detections, mrcnn_mask

 

model = modellib.MaskRCNN(mode="inference", config=out_config,model_dir=MODEL_DIR)

model_path = 'mrcnn.pb'

image = cv2.imread("D:/DL/Mask_RCNN/model/balloon/datasets/train/53500107_d24b11b3c2_b.jpg")
image = cv2.resize(image,(1024,1024),0,0,cv2.INTER_AREA)
sessd, input_image, input_image_meta, input_anchors, detections, mrcnn_mask = load_detection_model(model_path)
results = model.detect_pb([image], sessd, input_image, input_image_meta, input_anchors, detections, mrcnn_mask,verbose=1)

#print('rois : ' , results[0]['rois'])
#print('class_ids : ' , results[0]['class_ids'])
#print('scores  : ', results[0]['scores'])
#print('masks  : ', results[0]['masks'].shape)

print(results[0]['masks'].shape)