Esempio n. 1
0
def build_model(model_name, img_size, num_classes):
    """
    Build model.

    Arguments
        model_name : (str) model name, which is used to select model.
        img_size : (int) image size for both width and height for modeling.
        num_classes : (int) number of classes for the last layer of the model.

    Returns
        model : a tensorflow model object.

    """
    image_shape = (img_size, img_size, 3)

    # Initialize model
    model = models.Sequential()

    # Load model
    if model_name == 'dummy':
        model.add(
            layers.MaxPooling2D(pool_size=(4, 4), input_shape=image_shape))

    else:
        print('[KF INFO] Loading pre-trained model ...')
        if model_name == 'VGG16':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = VGG16(weights='imagenet',
                         include_top=False,
                         input_shape=image_shape)
        elif model_name == 'VGG19':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = VGG19(weights='imagenet',
                         include_top=False,
                         input_shape=image_shape)
        elif model_name == 'InceptionResNetV2':
            if img_size > 299:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 299!"
                    % model_name)
            conv = InceptionResNetV2(weights='imagenet',
                                     include_top=False,
                                     input_shape=image_shape)
        elif model_name == 'InceptionV3':
            if img_size > 299:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 299!"
                    % model_name)
            conv = InceptionV3(weights='imagenet',
                               include_top=False,
                               input_shape=image_shape)
        elif model_name == 'ResNet50':
            if img_size > 224:
                raise Exception(
                    "[KF ERROR] For %s model, the input image size cannot be larger than 224!"
                    % model_name)
            conv = ResNet50(weights='imagenet',
                            include_top=False,
                            input_shape=image_shape)
        else:
            raise Exception("[KF ERROR] Cannot load the pre-trained model! ")

        print(
            "[KF INFO] The pretrained model %s's convolutional part is loaded ..."
            % model_name)
        model.add(conv)

    # Add top layers
    fc_size = 256
    model.add(layers.Flatten())
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(fc_size, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(num_classes, activation='softmax'))

    return model
Esempio n. 2
0
def train(x_train, y_train, vocab_processor, x_dev, y_dev):
    # train using keras : resnet
    # ==================================================

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
          allow_soft_placement=True,
          log_device_placement=False)
        sess = tf.Session(config=session_conf)
        print('backend',K.backend())
        #K.set_session(sess) # connect Keras backend

        with sess.as_default():
            with tf.device('/gpu:0'):
                model = ResNet50(include_top=True, weights=None, classes=y_train.shape[1], input_shape=(x_train.shape[1], embedding_dim, 1))

            # Define Training procedure
            # keras implementation model compile
            optimizer = tf.keras.optimizers.Adam(0.001)

            model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])

            #model.summary()
            
            # global_step = tf.Variable(0, name="global_step", trainable=False)
            # optimizer = tf.train.AdamOptimizer(1e-3)
            # grads_and_vars = optimizer.compute_gradients(cnn.loss)
            # train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

            # Output directory for models and summaries
            out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", checkpoint_directory))
            print("Writing to {}\n".format(out_dir))

            # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
            checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            saver = tf.train.Saver(tf.all_variables())

            sequence_length = x_train.shape[1]

            # Write vocabulary
            vocab_processor.save(os.path.join(out_dir, "vocab"))

            with tf.device('/cpu:0'), tf.name_scope("embedding"):
                W = tf.Variable(
                    tf.random_uniform([len(vocab_processor.vocabulary_), embedding_dim], -1.0, 1.0),
                    name="W",
                    trainable=True)

            # Initialize all variables
            sess.run(tf.initialize_all_variables())

            if embedding == "word2vec":
               # initial matrix with random uniform
                initW = np.random.uniform(-0.25,0.25,(len(vocab_processor.vocabulary_), embedding_dim))
                # load any vectors from the word2vec
                print("Embed word using {}\n".format(embedding))
                with open("./embedding/GoogleNews-vectors-negative300.bin", "rb") as f:
                    header = f.readline()
                    vocab_size, layer1_size = map(int, header.split())  # 3000000, 300
                    binary_len = np.dtype('float32').itemsize * layer1_size # 1200
                    # print(vocab_size, layer1_size)
                    for line in range(vocab_size):
                        # print(line)
                        word = []
                        while True:
                            ch = f.read(1).decode('latin-1')
                            if ch == ' ':
                                word = ''.join(word)
                                break
                            if ch != '\n':
                                word.append(ch)
                            else:
                                print('else: ', word, ch)
                        # print(word)
                        idx = vocab_processor.vocabulary_.get(word)
                        # print("value of idx is" + str(idx));
                        if idx != 0:
                            # print("came to if")
                            initW[idx] = np.fromstring(f.read(binary_len), dtype='float32')
                        else:
                            # print("came to else");
                            f.read(binary_len)
                W.assign(initW)
                print("Ended")

            def train_step(x_batch, y_batch, step):
                """
                A single training step
                """
                x_batch = np.asarray(x_batch)
                x_batch = tf.expand_dims(tf.nn.embedding_lookup(W, x_batch), -1)
                y_batch = tf.argmax(np.array(y_batch), axis=1)
                y_batch = tf.reshape(y_batch, [-1,1])
                
                loss, accuracy = model.train_on_batch(x_batch, y_batch)
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))

            def dev_step(x_batch, y_batch, step):
                """
                Evaluates model on a dev set
                """
                x_batch = np.asarray(x_batch)
                x_batch = tf.expand_dims(tf.nn.embedding_lookup(W, x_batch), -1)
                y_batch = tf.argmax(np.array(y_batch), axis=1)
                y_batch = tf.reshape(y_batch, [-1,1])

                loss, accuracy = model.test_on_batch(x_batch, y_batch)
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))

            # Generate batches
            batches = data_helpers.batch_iter(
                list(zip(x_train, y_train)), batch_size, num_epochs)
            # Training loop. For each batch...
            current_step = 0
            #with tf.device('/gpu:0'):
            for batch in batches:
                x_batch, y_batch = zip(*batch)
                current_step += 1
                train_step(x_batch, y_batch, current_step)
                #current_step = tf.train.global_step(sess, global_step)
                if current_step % evaluate_every == 0:
                    print("\nEvaluation:")
                    dev_step(x_dev, y_dev, current_step)
                    print("")
                if current_step % checkpoint_every == 0:
                    #path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                    path = os.path.join(checkpoint_prefix), current_step
                    model.save(path)
                    print("Saved model checkpoint to {}\n".format(path))
Esempio n. 3
0
for img_path in img_paths:
    if 'upright' in img_path:
      print("Sample Upright Image")
      display(Image.open(img_path))
    else:
      print("Sample Sideways Image")
      display(Image.open(img_path))


#Modeling to detect whether the Dog Image is a Upright Image or Sideways Image with Transfer Learning..!

num_classes = 2


model = Sequential()
model.add(ResNet50(include_top=False, pooling='avg', weights='imagenet'))
model.add(Dense(num_classes, activation='softmax'))

model.layers[0].trainable = False

model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

image_size = 224
data_generator = ImageDataGenerator(preprocess_input,width_shift_range=0.2,height_shift_range=0.2)
train_generator = data_generator.flow_from_directory(
                                        directory='/content/drive/My Drive/Transfer_Learning/dogsvscats/train',
                                        target_size=(image_size, image_size),
                                        batch_size=10,
                                        class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
                                        directory='/content/drive/My Drive/Transfer_Learning/dogsvscats/val',
Esempio n. 4
0
           "GallbladderDissection":3, "GallbladderPackaging":4, "CleaningCoagulation":5, "GallbladderRetraction":6}


num_classes = 7

# just rename some varialbes
frames = 5
channels = 3
rows = 224
columns = 224 


video = Input(shape=(frames,rows,columns,channels))
#cnn_base = VGG16(input_shape=(rows,columns,channels),
cnn_base = ResNet50(input_shape=(rows, columns, channels),
                 weights="imagenet",
                 include_top=False)
                             

cnn_out = GlobalAveragePooling2D()(cnn_base.output)

cnn = Model(inputs=cnn_base.input, outputs=cnn_out)

#cnn.trainable = True

#Use Transfer learning and train only last 4 layers                 
for layer in cnn.layers[:-18]:
    layer.trainable = False


cnn.summary()
Esempio n. 5
0
                "--filter",
                type=str,
                default=None,
                help="comma separated list of ImageNet labels to filter on")
args = vars(ap.parse_args())

# grab the label filters command line argument
labelFilters = args["filter"]

# if the label filter is not empty, break it into a list
if labelFilters is not None:
    labelFilters = labelFilters.lower().split(",")

# load ResNet from disk (with weights pre-trained on ImageNet)
print("[INFO] loading ResNet...")
model = ResNet50(weights="imagenet")

# load the input image from disk and grab its dimensions
image = cv2.imread(args["image"])
(H, W) = image.shape[:2]

# run selective search on the input image
print("[INFO] performing selective search with '{}' method...".format(
    args["method"]))
rects = selective_search(image, method=args["method"])
print("[INFO] {} regions found by selective search".format(len(rects)))

# initialize the list of region proposals that we'll be classifying
# along with their associated bounding boxes
proposals = []
boxes = []
Esempio n. 6
0
from tensorflow.keras import preprocessing
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Input
from tensorflow.keras.layers.experimental.preprocessing import Rescaling



# create resnet model w imgnet weights
resnet_model = ResNet50(weights='imagenet', 
                        include_top=False, 
                        input_shape=(224, 224, 3),
                        pooling= None)
resnet_model.summary()

model= Sequential()

model.add(Rescaling(1./255, input_shape=(224, 224, 3)))
for layer in resnet_model.layers:
    layer.trainable = True
model.add(resnet_model)
model.add(GlobalAveragePooling2D())
model.add(Dense(1, activation='sigmoid'))

#model.load_weights(r'E:\Babette\MasterThesis\Models\ResNet_imgnet_trainable_full\resnet_model1_finetuned.h5')
model.load_weights(r'resnet_model1_finetuned.h5')
model.summary()
Esempio n. 7
0

#Print the current working directory for a quick sanity check
cwd = os.getcwd()
#Use when running on local runtime
cwd = os.path.join(cwd, 'Data')
print(cwd)
#Get the variables
train_pairs = get_image_pair_fnames(cwd, 'train')
val_pairs = get_image_pair_fnames(cwd, 'val')
params = {'dims': (256, 256, 3), 'batch_size': 32, 'shuffle': True}
train_datagen = DataGenerator(train_pairs, **params)
val_datagen = DataGenerator(val_pairs, **params)
print('ho')
#Now we build the model
#We begin by defining the ResNet101 to be used in the netowrk
pre_trained_model = ResNet50(input_shape=(256, 256, 3),
                             include_top=False,
                             weights=None)
output = Model_PSP(pre_trained_model, num_classes=35)
#We plot the model for better clarity
#Compile the model and check summary/plot it
model = models.Model(pre_trained_model.input, output)
model.compile(optimizer='adam',
              loss="categorical_crossentropy",
              metrics=['acc', iou_coef, dice_coef])

history = model.fit(train_datagen, validation_data=val_datagen, epochs=30)
metric_disp(history)
disp_samples(6)
Esempio n. 8
0
datalist = read_from_annfile(root, annfile, y_range=y_range, stack_length=1)
dataset = build_dataset_from_slices(*datalist,
                                    batch_size=batch_size,
                                    shuffle=True)

data_size = tf.data.experimental.cardinality(dataset).numpy()
val_dataset = dataset.take(int(0.3 * data_size))
train_dataset = dataset.skip(int(0.3 * data_size))
STEP_SIZE_TRAIN = tf.data.experimental.cardinality(train_dataset).numpy()

# %% Build and compile model
n_mae = normalize_mae(y_nums)  # make mae loss normalized into range 0 - 100.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
    backbone = ResNet50(weights=None,
                        input_shape=(224, 224, 3),
                        pooling='avg',
                        include_top=False)
    x = backbone(backbone.input)
    x = Dense(64, activation='relu', kernel_initializer='he_uniform')(x)
    x = Dropout(0.5)(x)
    x = Dense(32, activation='relu', kernel_initializer='he_uniform')(x)
    x = Dropout(0.5)(x)
    output = Dense(1, activation='relu', kernel_initializer='he_uniform')(x)
    model = Model(backbone.input, output)
    model_checkpoint = ModelCheckpoint(str(
        models_path.joinpath('{epoch:02d}-{val_n_mae:.2f}.h5')),
                                       period=1)
    lr_sche = LearningRateScheduler(lr_schedule)
    model.compile(loss=loss,
                  optimizer=tf.keras.optimizers.Adam(0.0001,
                                                     decay=1e-3 /
Esempio n. 9
0
                '--visualize',
                type=int,
                default=-1,
                help='whether or not show extra visualization for debugging')
args = vars(ap.parse_args())

#initialize veriables for object detection
WIDTH = 600
PYR_SCALE = 1.5
WIN_STEP = 16
ROI_SIZE = eval(args['size'])
INPUT_SIZE = (224, 224)

#loading network weights
print('Loading network ...')
model = ResNet50(weights='imagenet', include_top=True)

#load input image, resize it and then grab its dimensions
orig = cv2.imread(args['image'])
orig = imutils.resize(orig, width=WIDTH)
(H, W) = orig.shape[:2]

#initialize image pyramid
pyramid = image_pyramid(orig, scale=PYR_SCALE, minSize=ROI_SIZE)

#initializing two lists, one for ROIs generated from image pyramid and sliding window, and another to store
#(x, y) coordinates of ROI
rois = []
locs = []

#measuring how much time it takes to loop over image pyramid and sliding window
Esempio n. 10
0
def load_model():
	global model
	model = ResNet50(weights="imagenet")
Esempio n. 11
0
print ( metrics.accuracy_score (result, testY))
from sklearn.metrics import classification_report
print ( metrics.classification_report (result, testY))


# Ensemble using pre-trained models
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array, load_img

from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.densenet import DenseNet121

E_model_Res = ResNet50(weights = 'imagenet', include_top = False, input_shape = (32,32,3))
E_model_Dense = DenseNet121(weights = 'imagenet', include_top = False, input_shape = (32,32,3))


print('Number of trainable weights before freezing the conv base:', len(E_model_Res.trainable_weights))
E_model_Res.trainable = False
print('Number of trainable weights after freezing the conv base:', len(E_model_Res.trainable_weights))


print('Number of trainable weights before freezing the conv base:', len(E_model_Dense.trainable_weights))
E_model_Dense.trainable = False
print('Number of trainable weights after freezing the conv base:', len(E_model_Dense.trainable_weights))


E_model1 = models.Sequential()
E_model1.add(E_model_Res)
Esempio n. 12
0
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D

num_classes = 2
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

my_new_model = Sequential()
my_new_model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
my_new_model.add(Dense(num_classes, activation='softmax'))

my_new_model.layers[0].trainable = False

my_new_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])


# Specify which type of ImageDataGenerator above is to load in training data
train_generator = data_generator_with_aug.flow_from_directory(
        directory = '../input/dogs-gone-sideways/images/train',
        target_size=(image_size, image_size),
        batch_size=12,
        class_mode='categorical')

# Specify which type of ImageDataGenerator above is to load in validation data
validation_generator = data_generator_no_aug.flow_from_directory(
        directory = '../input/dogs-gone-sideways/images/val',
        target_size=(image_size, image_size),
        class_mode='categorical')

my_new_model.fit_generator(
        train_generator, # if you don't know what argument goes first, try the hint
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.20,
                                                  stratify=labels,
                                                  random_state=42)
# initialize the training data augmentation object
trainAug = ImageDataGenerator(rotation_range=15,
                              fill_mode="nearest",
                              preprocessing_function=preprocess_input)

#Let's try RESNET50

base_model = ResNet50(weights=None,
                      include_top=False,
                      input_shape=(224, 224, 3))

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.6)(x)
predictions = Dense(2, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)

#Compile the model
print("[INFO] compiling the model...")
opt = Adam(lr=learning_rate, decay=learning_rate / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

#Training the model
history = model.fit_generator(trainAug.flow(trainX,
def quantize_aware_training(args):

    if not (args.evaluate):

        if args.pretrained:
            fp_model = ResNet50()

        #Weight should load to QAT model
        # else:
        #   fp_model = ResNet50(weights=None)
        #   if(args.model_dir is None):
        #     raise Exception('Checkpoin path is not given')
        #   else:
        #     fp_model.load_weights(args.model_dir)

        fp_model.summary()

        qat_model = quantize_aware_model(fp_model, args.num_bits)

        qat_model.summary()
        mode = 'train'

        train_generator = data_generator(mode,
                                         args.data,
                                         args.batch_size,
                                         args.validation_split,
                                         subset='training',
                                         augment=True)

        #Note : validation data should be used from train data.
        #       This must be test generator for evaluation

        validation_generator = data_generator(mode,
                                              args.data,
                                              args.batch_size,
                                              args.validation_split,
                                              subset='validation')
        # validation_generator =None

        ##Need to find a good way
        current_train_dir = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
        current_path = os.path.join(args.output_dir, current_train_dir)
        if not os.path.exists(current_path):
            os.makedirs(current_path)

        ckpt_path = os.path.join(current_path, 'checkpoints')
        if not os.path.exists(ckpt_path):
            os.makedirs(ckpt_path)

        train(qat_model,
              train_generator,
              args.batch_size,
              validation_generator,
              epochs=args.epochs,
              learning_rate=args.learning_rate,
              ckpt_path=ckpt_path,
              args=args)

        if True:
            model_path = os.path.join(current_path, 'saved_model')

            if not os.path.exists(model_path):
                os.makedirs(model_path)

            save_model(qat_model, model_path)

    else:
        evaluate(args.model_dir, args)
Esempio n. 15
0
def train():
    """The main function that runs training"""

    ## data

    dataset_dir = './dataset/photo'
    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
        get_dataset(dataset_dir, is_training=True)

    data_queue = tf.RandomShuffleQueue(capacity=32, min_after_dequeue=0,
                                       dtypes=(
                                           image.dtype, ih.dtype, iw.dtype,
                                           gt_boxes.dtype, gt_masks.dtype,
                                           num_instances.dtype, img_id.dtype))
    enqueue_op = data_queue.enqueue((image, ih, iw, gt_boxes, gt_masks, num_instances, img_id))
    data_queue_runner = tf.train.QueueRunner(data_queue, [enqueue_op] * 2)
    tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, data_queue_runner)
    (image, ih, iw, gt_boxes, gt_mask, num_instances, img_id) = data_queue.dequeue()  # 여기서 image, gt_mask, img_id만 쓴다
    im_shape = tf.shape(image)
    image = tf.reshape(image, (im_shape[0], im_shape[1], im_shape[2], 3))
    gt_mask = tf.cast(gt_mask, tf.float32)

    image = tf.image.resize_bilinear(image, [448, 448])
    gt_mask = tf.expand_dims(gt_mask, axis=3)
    gt_mask = tf.image.resize_bilinear(gt_mask, [56, 56])

    model = ResNet50(input_tensor=image, include_top=False, weights=None, pooling='max')
    model.trainable = True
    selected_model1 = Model(inputs=model.input,
                            outputs=model.get_layer('activation_48').output)  # output=(None, 14, 14, 2048)
    selected_model2 = Model(inputs=model.input,
                            outputs=model.get_layer('activation_39').output)  # output=(None, 28, 28, 1024)
    selected_model3 = Model(inputs=model.input,
                            outputs=model.get_layer('activation_21').output)  # output=(None, 56, 56, 512)
    selected_model4 = Model(inputs=model.input,
                            outputs=model.get_layer('activation_9').output)  # output=(None, 112, 112, 256)
    # selected_model1 ~ 3 으로만 pyramid를 build 하자

    y1 = selected_model1.output
    y2 = selected_model2.output
    y3 = selected_model3.output
    y4 = selected_model4.output

    pyramid_1 = Conv2D(filters=256, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same', name="P1")(
        y1)  # (None, 14, 14, 256)

    pre_P2_1 = tf.image.resize_bilinear(pyramid_1, [28, 28], name='pre_P2_1')  # (None, 28, 28, 256)
    pre_P2_2 = Conv2D(filters=256, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same',
                      name="pre_p2_2")(y2)  # (None, 28, 28, 256)
    pre_P2 = tf.add(pre_P2_1, pre_P2_2)  # (None, 28, 28, 256)
    pyramid_2 = Conv2D(filters=256, strides=(1, 1), kernel_size=(3, 3), activation='relu', padding='same', name="P2")(
        pre_P2)  # (None, 28, 28, 256)

    pre_P3_1 = tf.image.resize_bilinear(pyramid_2, [56, 56], name='pre_P3_1')  # (None, 56, 56, 256)
    pre_P3_2 = Conv2D(filters=256, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same',
                      name="pre_p3_2")(y3)  # (None, 56, 56, 256)
    pre_P3 = tf.add(pre_P3_1, pre_P3_2)  # (None, 56, 56, 256)
    pyramid_3 = Conv2D(filters=256, strides=(1, 1), kernel_size=(3, 3), activation='relu', padding='same', name="P3")(
        pre_P3)  # (None, 56, 56, 256)
    m = pyramid_3

    # pre_P4_1= =tf.image.resize_bilinear(pyramid_2, [112,112], name='pre_P4_1')                                                   #(None, 112, 112, 256)
    # pre_P4_2 = Conv2D(filters=256, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same', name="pre_p4_2")(y4)   #(None, 112, 112, 256)
    # pre_P4=tf.add(pre_P4_1,pre_P4_2)                                                                                             #(None, 112, 112, 256)
    # pyramid_4=Conv2D(filters=256, strides=(1, 1), kernel_size=(3, 3), activation='relu', padding='same', name="P4")(pre_P4)      #(None, 112, 112, 256)

    for _ in range(4):
        m = Conv2D(filters=256, strides=(1, 1), kernel_size=(3, 3), activation='relu', padding='same', name="C1")(
            m)  # (None, 56, 56, 256)

    m = Conv2D(filters=64, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same')(
        m)  # (None, 56, 56, 64)
    m = Conv2D(filters=16, strides=(1, 1), kernel_size=(1, 1), activation='relu', padding='same')(
        m)  # (None, 56, 56, 16)
    mask = Conv2D(filters=1, strides=(1, 1), kernel_size=(1, 1), activation='sigmoid', padding='same', name="mask")(
        m)  # (None, 56, 56, 1)

    global_step = tf.Variable(0, trainable=False, name='global_step')
    loss = tf.reduce_mean(tf.keras.losses.mean_squared_error(gt_mask, mask))  # (None, 56, 56, 1)  vs  (None, 56, 56, 1)
    train_step = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss, global_step=global_step)

    sess = tf.Session()
    k.set_session(sess)

    with tf.Session() as sess:

        # option 1
        # init = tf.global_variables_initializer()
        # sess.run(init)
        # init = tf.local_variables_initializer()
        # sess.run(init)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=20)
        ckpt = tf.train.get_checkpoint_state('dataset/checkpoint/checkpoint3')
        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
            saver.restore(sess, ckpt.model_checkpoint_path)  # 1.단순히 epoch를 연장하여 학습을 이어가는 경우
            # saver.restore(sess, "./checkpoint/nn.ckpt-3500")              # 2.over fitting 이 확인되어 특정 epoch를 선택하여 학습을 이어가는 경우
            # 1 or 2 중 하나만 실행시키면 된다.
            sess.run(tf.local_variables_initializer())  # string_input_producer 의 epoch 때문...

        else:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = []
        # print (tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS))
        for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
            threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                             start=True))

        tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in range(10000):
            sess.run([train_step])  # feed_dict를 쓰지 않는다. input pipeline 이 data를 feed 해준다
            if step % 500 == 0:
                loss_val = sess.run(loss)
                print('Step: {:4d} | Loss: {:.5f}'.format(step, loss_val))
                checkpoint_path = os.path.join('dataset/checkpoint/checkpoint3', 'nn.ckpt')
                saver.save(sess, checkpoint_path, global_step=global_step)
            if coord.should_stop():
                coord.request_stop()
                coord.join(threads)
Esempio n. 16
0
    def create_transfer_cnn(self, ref_model=None, fcn_weights=None):
        '''creates resnet model. will load deserialized weights by passing in weights'''

        if not ref_model:
            model = ResNet50(
                weights='imagenet',
                include_top=False,
                input_shape=(self.envs[0].observation_space.shape))
            for layer in model.layers:
                layer.trainable = False

            pretrained_weights = model.get_weights()

            flattened = Flatten()(model.output)
            #Add FCN
            for layer in range(self.num_layers):

                try:
                    nodes = self.nodes_per_layer[layer]
                except IndexError:
                    nodes = None

                if nodes is None:
                    nodes = self.default_nodes

                if layer == 0:
                    add_layer = Dense(units=nodes,
                                      activation='relu')(flattened)
                else:
                    add_layer = Dense(units=nodes,
                                      activation='relu')(add_layer)

            if self.num_layers:
                output = Dense(units=self.num_outputs,
                               activation=self.activation)(add_layer)
            else:
                output = Dense(units=self.num_outputs,
                               activation=self.activation)(flattened)

            model = Model(model.inputs, output)
            model.compile(Adam(lr=1e-3), 'mse', metrics=['acc'])
        else:
            model = ref_model

            if fcn_weights:
                assert len(fcn_weights) == len(self.weight_shapes), \
                  f'Invalid Weight Structure. Expected {len(self.weight_shapes)}, got {len(fcn_weights)}.'
                all_weights = model.get_weights()
                untrainable = all_weights[:-len(self.weight_shapes)]
                weights = all_weights[-len(self.weight_shapes):]
                # print('Deserialized weights length:', len(weights))
                for i, matrix in enumerate(weights):
                    # print('Original', matrix)
                    matrix[:] = fcn_weights[i]
                    # print('Result', matrix)

                model.set_weights(untrainable + weights)

        #create deserialize dependencies
        if self.weight_shapes is None:
            model.summary()
            self.weight_shapes = []
            self.weights_lengths = []

            weights = model.get_weights()
            self.full_weights_length = len(weights)
            self.pretrained_weights_length = len(pretrained_weights)
            for i in range(len(pretrained_weights), len(weights)):
                self.weight_shapes.append(weights[i].shape)

                #generate indicies of weights to recreate weight structure from gene string
                length = len(weights[i].reshape(1, -1)[0].tolist())
                if not self.weights_lengths:
                    self.weights_lengths.append(length)
                else:
                    self.weights_lengths.append(
                        self.weights_lengths[len(self.weights_lengths) - 1] +
                        length)
            if self.mxrt == 'default':
                self.mxrt = math.log(self.weights_lengths[-1],
                                     10) / (self.weights_lengths[-1])
            print('Weight Shapes:', self.weight_shapes)
            print('Weight Lengths:', self.weights_lengths)
            print('Mutation Rate:', self.mxrt)
            print('Crossover Type:', self.cxtype)
            print('Selection Type:', self.selection_type)
            print('Sharpness:', self.sharpness)

        return model
Esempio n. 17
0
# initialize the validation/testing data augmentation object (which
# we'll be adding mean subtraction to)
valAug = ImageDataGenerator()

# define the ImageNet mean subtraction (in RGB order) and set the
# the mean subtraction value for each of the data augmentation
# objects
mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
valAug.mean = mean

# load the ResNet-50 network, ensuring the head FC layer sets are left
# off
baseModel = ResNet50(weights="imagenet",
                     include_top=False,
                     input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(lb.classes_), activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
Esempio n. 18
0
def multi_quality(res=None,
                  env=None,
                  layers=1,
                  shapes=(1, ),
                  lengths=(1, ),
                  inputs=None,
                  outputs=1,
                  genes=None,
                  index=None,
                  sharpness=1,
                  activation='linear',
                  nodes_per_layer=[128],
                  transfer=False):
    '''
    implements multiprocessed nn evaluation on a gym environment
    res: results are indexed into res at `index` 
  '''
    try:
        genes = [val for val in genes]
        print(f'Testing model {index}')
        if not transfer:
            model = Sequential()
            model.add(Dense(inputs, input_shape=(inputs, )))

            for layer in range(layers):

                try:
                    nodes = nodes_per_layer[layer]
                except IndexError:
                    nodes = None

                if nodes is None:
                    nodes = 128

                model.add(Dense(units=nodes, activation='relu'))

            #output layer
            model.add(Dense(units=outputs, activation=activation))
            model.compile(optimizer=Adam(lr=0.001),
                          loss='mse',
                          metrics=['accuracy'])
        elif transfer:
            model = ResNet50(weights='imagenet',
                             include_top=False,
                             input_shape=(env.observation_space.shape))
            for layer in model.layers:
                layer.trainable = False

            flattened = Flatten()(model.output)
            #Add FCN
            for layer in range(layers):

                try:
                    nodes = nodes_per_layer[layer]
                except IndexError:
                    nodes = None

                if nodes is None:
                    nodes = 128

                if layer == 0:
                    add_layer = Dense(units=nodes,
                                      activation='relu')(flattened)
                else:
                    add_layer = Dense(units=nodes,
                                      activation='relu')(add_layer)

            if layers:
                output = Dense(units=outputs, activation=activation)(add_layer)
            else:
                output = Dense(units=outputs, activation=activation)(flattened)

            model = Model(model.inputs, output)
            model.compile(Adam(lr=1e-3), 'mse', metrics=['acc'])

        if transfer:
            fcn_weights = deserialize(genes, shapes, lengths)
            assert len(fcn_weights) == len(shapes), \
              f'Invalid Weight Structure. Expected {len(shapes)}, got {len(fcn_weights)}.'
            all_weights = model.get_weights()
            untrainable = all_weights[:-len(shapes)]
            weights = all_weights[-len(shapes):]
            for i, matrix in enumerate(weights):
                matrix[:] = fcn_weights[i]

            model.set_weights(untrainable + weights)
        else:
            weights = deserialize(genes, shapes, lengths)
            model.set_weights(weights)

        # print('index', index)
        # print('genes', genes)
        # print('weights', weights, '\n\n\n')

        total_rewards = []
        for epoch in range(sharpness):
            done = False
            rewards = []
            envstate = env.reset()
            while not done:
                #adj envstate
                if transfer:
                    envstate = np.expand_dims(envstate, axis=0)
                else:
                    envstate = envstate.reshape(1, -1)

                qvals = model.predict(envstate)[0]
                if outputs == 1:
                    action = qvals  #continuous action space
                else:
                    action = np.argmax(qvals)  #discrete action space

                envstate, reward, done, info = env.step(action)
                rewards.append(reward)

            total_rewards.append(reward)

        # if 5 >= sharpness >= 1:
        #   result = max(total_rewards)
        # else:
        result = sum(total_rewards) / len(total_rewards)
    except Exception as e:
        print('Exception Occured in Process!', e)
        result = -1000000
    print(f'Model {index} Results: {result}')
    res[index] = result

    # spontaneous saving
    if result > .79:
        print(f'Saving model {index}...')
        model.save_weights(
            f'./results/cornGA_{str(round(result, 2)*100)[:-2]}.h5')
        print('Model saved')
    return result
Esempio n. 19
0
def make_base_model(img_shape, n_output_neurons, optimizer, base_model_name,
                    model_counter):
    '''Create a DNN to perform a task
        img_shape: (int,int,int), (Height, Width, Channels) of input images
        n_output_neurons: int number of classes in task
        optimizer: normal keras optimizer
        base_model_name: str name to give the net
        model_counter: int assign an "identification number" to your model name
        '''

    print(base_model_name.lower())
    if (base_model_name.lower() == 'resnet50') or (base_model_name.lower()
                                                   == 'resnet50_hb'):
        from tensorflow.keras.applications import ResNet50
        from main import batch_size
        input_layer = tf.keras.layers.Input(batch_shape=(batch_size, ) +
                                            img_shape)
        model = ResNet50(weights=None,
                         input_tensor=input_layer,
                         classes=n_output_neurons)
    elif (base_model_name.lower()
          == 'vgg16') or (base_model_name.lower()
                          == 'vgg_16_hb') and img_shape[0]:
        try:
            from tensorflow.keras.applications import VGG16
            from main import batch_size
            input_layer = tf.keras.layers.Input(batch_shape=(batch_size, ) +
                                                img_shape)
            model = VGG16(weights=None,
                          input_tensor=input_layer,
                          classes=n_output_neurons)
        except:
            print(
                'VGGnet requires with 224x224x3 images. Please choose another network'
            )
    else:
        model = tf.keras.Sequential([
            tf.keras.layers.InputLayer(input_shape=img_shape),
            tf.keras.layers.Conv2D(filters=25,
                                   kernel_size=(9, 9),
                                   padding='same',
                                   activation='relu'),
            tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
            tf.keras.layers.Conv2D(filters=50,
                                   kernel_size=(5, 5),
                                   padding='same',
                                   activation='relu'),
            tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
            tf.keras.layers.Conv2D(filters=50,
                                   kernel_size=(5, 5),
                                   padding='same',
                                   activation='relu'),
            tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
            tf.keras.layers.Conv2D(filters=100,
                                   kernel_size=(3, 3),
                                   padding='same',
                                   activation='relu'),
            tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(n_output_neurons, activation='softmax')
        ])

    # needed for saving models
    checkpoint_dict = {}
    base_model_name = str(model_counter) + '_' + base_model_name
    checkpoint_dict[
        'checkpoint_path'] = './model_checkpoints/' + base_model_name + '_ckpt'
    checkpoint_dict['checkpoint'] = tf.train.Checkpoint(step=tf.Variable(0),
                                                        optimizer=optimizer,
                                                        net=model)
    checkpoint_dict['saving_manager'] = tf.train.CheckpointManager(
        checkpoint_dict['checkpoint'],
        checkpoint_dict['checkpoint_path'],
        max_to_keep=1)

    # Print network summary and check which layers are trainable
    model.summary()
    for layer in model.layers:
        print('{}: layer {} has trainable = {}.'.format(
            base_model_name, layer.name, layer.trainable))

    return model, checkpoint_dict
Esempio n. 20
0
print(train_generator, validation_generator)
NUM_EPOCHS = 5
INIT_LR = 1e-4
"""class LossAndErrorPrintingCallback(tf.keras.callbacks.Callback):
    
    def on_epoch_end(self, epoch, logs=None):
      print('The average loss for epoch {} is {:7.2f} and the accuracy is {:7.2f}.'.format(epoch, logs['loss'], logs['accuracy']))
      print('The average validation loss for epoch {} is {:7.2f} and the validation accuracy is {:7.2f}.'.format(epoch, logs['val_loss'], logs['val_accuracy']))

"""
# In[6]:

opt1 = tf.keras.optimizers.SGD(lr=INIT_LR, momentum=0.9, nesterov=True)

model = ResNet50()

model.summary()

quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
if (numBits == 8):
    with quantize_scope({
            'Conv2DQuantizeConfig': quant8.Conv2DQuantizeConfig,
            'ActivationQuantizeConfig': quant8.ActivationQuantizeConfig,
            'DenseQuantizeConfig': quant8.ActivationQuantizeConfig
    }):

        def apply_quantization(layer):
            if isinstance(layer, tf.keras.layers.Conv2D):
Esempio n. 21
0
import tensorflow as tf

# tf.config.set_per_process_memory_fraction(0.75)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# resnet_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

NUM_CLASSES = len(os.listdir("nmlo-contest-4/train/train/"))

print("Number of classes:", NUM_CLASSES)

# Still not talking about our train/test data or any pre-processing.
model = Sequential()

resnet_model = ResNet50(include_top=False,
                        pooling='avg',
                        weights='imagenet',
                        input_shape=(244, 244, 3))

model.add(resnet_model)
# model.add(Dense(128, activation='relu'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
# model.layers[0].trainable = False

for layer in resnet_model.layers:
    if isinstance(layer, BatchNormalization):
        layer.trainable = True
    else:
        layer.trainable = False
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          3).astype('float32') / 255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                        3).astype('float32') / 255.

#2. 모델
t = ResNet50(weights='imagenet',
             include_top=False,
             input_shape=(x_train.shape[1], x_train.shape[2], 3))
t.trainable = False  #학습시키지 않겠다 이미지넷 가져다가 그대로 쓰겠다
# model.trainable=True

model = Sequential()
model.add(t)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
    "--visualize",
    type=int,
    default=-1,
    help="whether or not to show extra visualizations for debugging")
args = vars(ap.parse_args())

# initialize variables used for the object detection procedure
WIDTH = 600
PYR_SCALE = 1.5
WIN_STEP = 16
ROI_SIZE = eval(args["size"])
INPUT_SIZE = (224, 224)

# load our the network weights from disk
print("[INFO] loading network...")
model = ResNet50(weights="imagenet", include_top=True)

# load the input image from disk, resize it such that it has the
# has the supplied width, and then grab its dimensions
orig = cv2.imread(args["image"])
orig = imutils.resize(orig, width=WIDTH)
(H, W) = orig.shape[:2]

# initialize the image pyramid
pyramid = image_pyramid(orig, scale=PYR_SCALE, minSize=ROI_SIZE)

# initialize two lists, one to hold the ROIs generated from the image
# pyramid and sliding window, and another list used to store the
# (x, y)-coordinates of where the ROI was in the original image
rois = []
locs = []
conv_base.trainable = True

model = models.Sequential()
model.add(conv_base)

model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.summary()

## resnet model
from tensorflow.keras.applications import ResNet50

conv_base = ResNet50(weights='imagenet',
                     include_top=False,
                     input_shape=(150, 150, 3))

conv_base.trainable = True
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.summary()

##
model.compile(
    loss=
    'categorical_crossentropy',  # for multiclass use categorical_crossentropy
# vgg16.summary()
print("Xception",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet101()
# vgg16.summary()
print("ResNet101",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet101V2()
# vgg16.summary()
print("ResNet101V2",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet152()
# vgg16.summary()
print("ResNet152",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet50()
# vgg16.summary()
print("ResNet50",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet50V2()
# vgg16.summary()
print("ResNet50V2",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetLarge()
# vgg16.summary()
print("NASNetLarge",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetMobile()
# vgg16.summary()
Esempio n. 26
0
                required=True,
                help="path to input directory of images")
ap.add_argument("-c",
                "--csv",
                required=True,
                help="path to csv file containing labels and bounding boxes")
args = vars(ap.parse_args())

#Path to images
imagePath = list(paths.list_images(args["dataset"]))

#Path to store the HDF5 File
HDF5file = f"results/ResNetValOutput.hdf5"

#Load ResNet model (without the FC layer head)
resnet = ResNet50(weights="imagenet", include_top=False)

#validation path
val_Path = os.path.join(args['csv'], "Validation_Labels_and_Boxes.csv")

#dict and list to store validation results
val_dict = {}
val_detections = []

#Read validation labels to store for testing
with open(val_Path, mode='r') as file:
    csv_reader = csv.DictReader(file)
    data = list(csv_reader)

for row in data:
    for k, v in row.items():
Esempio n. 27
0
def extract(dataset=None,
            concept_csv_paths=None,
            reduce_by=0,
            model=None,
            output_layer=-1,
            text_extract=None,
            text_model=None,
            output_layer_text=-1):
    concept_data = data_from_concept_csvs(concept_csv_paths)
    concept_list = get_list_of_concepts(concept_data)
    if model in [
            'VGG16', 'VGG19', 'RESNET50', 'INCEPTION', 'DENSE121', 'XCEPTION'
    ] and (text_extract != 'only' or text_extract == None):
        if model == 'VGG19':
            model = VGG19(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        elif model == 'VGG16':
            model = VGG16(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        elif model == 'RESNET50':
            model = ResNet50(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        elif model == 'DENSE121':
            model = DenseNet121(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        elif model == 'INCEPTION':
            model = InceptionV3(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        elif model == 'XCEPTION':
            model = InceptionV3(weights='imagenet')
            model = Model(inputs=model.inputs,
                          outputs=model.layers[output_layer].output)
        print(model.summary())

    elif model not in [
            'VGG16', 'VGG19', 'RESNET50', 'INCEPTION', 'DENSE121', 'XCEPTION'
    ] and (text_extract != 'only' or text_extract == None):
        model = load_model(model)
        model = Model(inputs=model.inputs,
                      outputs=model.layers[output_layer].output)
        print(model.summary())

    if text_extract == 'only' or text_extract == 'with':
        model2 = load_model(text_model)
        model2 = Model(inputs=model2.inputs,
                       outputs=model2.get_layer(output_layer_text).output)
        print(model2.summary())

    tokenizer = create_tokenizer(concept_list)
    vocab_size = len(tokenizer.word_index) + 1

    PATHS = [path for path in glob(dataset + '/*/*/*')]
    random.shuffle(PATHS)

    if reduce_by is not None:
        start = int(len(PATHS) * reduce_by)
        PATHS = PATHS[start:]

    PATHS = tqdm(PATHS)
    PATHS.set_description("Extracting Features")

    data_list = list()

    for path in PATHS:

        data_object = dict()

        path_split = os.path.split(path)
        image_class = os.path.split(path_split[-2])[-1]
        image_name = path_split[-1]
        concepts = concept_data[image_name]

        data_object['filepath'] = path
        data_object['image_class'] = image_class
        data_object['concepts'] = concepts

        if path is None:
            raise ('stop')

        if text_extract != 'only' or text_extract == None:
            data_object['image_feature'] = image_feature(path, model)

        elif text_extract == 'only' or text_extract == 'with':
            data_object['concepts_feature'] = text_feature(
                model2, vocab_size, concepts, tokenizer)

        data_list.append(data_object)

    return [data_list, concept_list, tokenizer, vocab_size]
Esempio n. 28
0
mc = ModelCheckpoint('C:/LPD_competition/lotte_projcet2.h5',
                     save_best_only=True,
                     verbose=1)

train_generator = idg.flow(x_train, y_train, batch_size=32)
# seed => random_state
valid_generator = idg2.flow(x_valid, y_valid)
test_generator = x_pred

from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation
from tensorflow.keras.applications import VGG19, MobileNet, ResNet50

efficientnetb7 = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=x_train.shape[1:])
efficientnetb7.trainable = True

a = efficientnetb7.output
a = GlobalAveragePooling2D()(a)
a = Flatten()(a)
a = Dense(128)(a)
a = BatchNormalization()(a)
a = Activation('relu')(a)
a = Dense(64)(a)
a = BatchNormalization()(a)
a = Activation('relu')(a)
a = Dense(1000, activation='softmax')(a)

model = Model(inputs=efficientnetb7.input, outputs=a)
Esempio n. 29
0
def _load_model():
    # Khởi tạo model
    model = ResNet50(weights='imagenet')
    print("Load model complete!")
    return model
# 한번만 로드하는 모델 목록
# 공통으로 사용되는 모델
from tensorflow.keras.applications import ResNet152V2, ResNet50
import time
import logging
start = time.time()
RESNET152V2 = ResNet152V2(
    weights='imagenet',
    include_top=False,
    input_shape=(224, 224, 3)
)
RESNET152V2.trainable=False

RESNET50 = ResNet50(
    weights='imagenet',
    include_top=False,
    input_shape=(224, 224, 3)
)
RESNET50.trainable=False

print('* RESNET init model time :', time.time() - start)

# 성공/실패를 구분하는 기준
# PREDICTION_RATE 보다 크면 성공, 작으면 실패
PREDICTION_RATE = 0.50

# 학습, 테스트 분리 비율
SPLIT_RATIO = 0.2

# 이미지 로드할 때 너비, 높이, 채널
W = 224