コード例 #1
0
ファイル: tal.py プロジェクト: talregev/gym-gazebo
    def createModel(self):
        # Network structure must be directly changed here.
        base_model = VGG16(backend=backend,
                           layers=layers,
                           models=models,
                           utils=utils,
                           weights='imagenet',
                           include_top=False,
                           input_shape=(self.img_rows, self.img_cols,
                                        self.img_channels))

        for layer in base_model.layers:
            layer.trainable = False

        x = base_model.output

        # flat: passing it to a dense layer
        x = Flatten()(x)
        # 1st Dense Layer
        x = Dense(4096, activation='relu', name='fc1')(x)
        # 2st Dense Layer
        x = Dense(4096, activation='relu', name='fc2')(x)

        # Output Layer
        x = Dense(network_outputs)(x)

        model = Model(base_model.input, x)

        #adam = Adam(lr=self.learningRate)
        #model.compile(loss='mse',optimizer=adam)
        model.compile(RMSprop(lr=self.learningRate), 'MSE')
        model.summary()

        return model
コード例 #2
0
class CarClassifier:
    pre_model = VGG16(weights=None,
                      include_top=False,
                      input_shape=(224, 224, 3),
                      backend=keras.backend,
                      layers=keras.layers,
                      models=keras.models,
                      utils=keras.utils)
    pre_model.trainable = False
    pre_model.summary()

    # add output layer for VGG16 output (4096 -> 1000 => 4096 -> 1000 -> 100)
    vgg_model = models.Sequential()
    vgg_model.add(pre_model)
    vgg_model.add(layers.Flatten())
    vgg_model.add(layers.Dense(4096, activation='relu'))
    vgg_model.add(layers.Dense(1024, activation='relu'))
    vgg_model.add(layers.Dropout(0.5))
    vgg_model.add(layers.Dense(20, activation='softmax'))  # practical

    vgg_model.summary()

    vgg_model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.RMSprop(lr=2e-5),
                      metrics=['acc'])

    output_label = vgg_model.predict_classes()
コード例 #3
0
    def __init__(self, network_arch='vgg'):
        self.network_arch = network_arch

        if network_arch == 'vgg':
            self.model = VGG16(weights='imagenet', include_top=False)
        else:
            raise ValueError('Not defined for other values. Use only vgg')
        self.model.summary()
コード例 #4
0
    def trenuj(self, x_train, x_test, y_train, y_test, il_etykiet):
        self.il_etykiet = il_etykiet

        #pobieramy parametry obecnej sieci neuronowej
        il_neuronow = self.wybrane_parametry['neurony']
        sposob_aktywacji = self.wybrane_parametry['aktywacja']
        il_warstw = self.wybrane_parametry['il_warstw']

        wejscie = Input(shape=(224, 224, 3))

        #tworzymy model bazujac na VGG16
        model = VGG16(input_tensor=wejscie,
                      include_top=False,
                      weights='imagenet')
        x = model.get_layer('block5_pool').output
        x = Flatten(name='flatten')(x)

        # dodajemy warstwy
        for i in range(il_warstw):
            x = Dense(il_neuronow, activation=sposob_aktywacji)(x)

        # warstwa wyjsciowa
        wyjscie = Dense(self.il_etykiet, activation='softmax')(x)
        nowy_model_vgg = Model(wejscie, wyjscie)

        # zamrazamy warstwy poza nasza nowa, zeby nie szkolic od nowa
        # orginalnego modelu
        for warstwa in nowy_model_vgg.layers[:-(il_warstw + 1)]:
            warstwa.trainable = False

        nowy_model_vgg.compile(loss='categorical_crossentropy',
                               optimizer='rmsprop',
                               metrics=['accuracy'])

        histogram = nowy_model_vgg.fit(x_train,
                                       y_train,
                                       batch_size=self.batch_size,
                                       epochs=self.epochs,
                                       verbose=self.verbose,
                                       validation_data=(x_test, y_test))

        # ewaluacja
        (loss, accuracy) = nowy_model_vgg.evaluate(x_test,
                                                   y_test,
                                                   batch_size=self.batch_size,
                                                   verbose=self.verbose)
        y_pred = nowy_model_vgg.predict(x_test, batch_size=self.batch_size)

        self.confusion_matrixx = confusion_matrix(y_test.argmax(axis=-1),
                                                  y_pred.argmax(axis=-1))
        self.histogram = histogram
        self.strata = loss
        self.celnosc = accuracy
コード例 #5
0
def main():
    im_root = tb.read_config('config.json', 'UCMerced_LandUse_directory')

    # Choosing the number of layer from VGG
    if VGG_LAYER == 16:
        extractor = VGG16(weights='imagenet', include_top=False)
    elif VGG_LAYER == 19:
        extractor = VGG19(weights='imagenet', include_top=False)

    # Get ImageData of 1 subsample (1 image every class)
    # 'IMG' is a folder containing image data
    data1 = getImageData('IMG/', 0, 21)
    X = extractor.predict(data1, verbose=0)

    # iterate for all 2100 images (100 times)
    for i in range(1, 100):
        data2 = getImageData('IMG/', i * 21, i * 21 + 21)
        X2 = extractor.predict(data2, verbose=0)
        X = np.concatenate((X, X2), axis=0)
        print(X.shape)  # to clarify only (optional)

    # save the array to external file that will be used for Neural Network
    np.save('Input_NN_VGG' + str(VGG_LAYER) + '.npy', X)
コード例 #6
0
def load_model():
    global network_model
    network_model = VGG16(weights="imagenet")
    global graph
    graph = tf.get_default_graph()
コード例 #7
0
    def build(self, use_cpu=False, print_summary=False):
        vgg16 = VGG16(weights="imagenet",
                      include_top=False,
                      input_shape=(224, 224, 3))

        inputs = Input(shape=(224, 224, 3))

        conv_block_1 = self.buildConv2DBlock(inputs, 64, 1, 2)
        pool1, pool1_argmax = MaxPoolingWithArgmax2D()(conv_block_1)

        conv_block_2 = self.buildConv2DBlock(pool1, 128, 2, 2)
        pool2, pool2_argmax = MaxPoolingWithArgmax2D()(conv_block_2)

        conv_block_3 = self.buildConv2DBlock(pool2, 256, 3, 3)
        pool3, pool3_argmax = MaxPoolingWithArgmax2D()(conv_block_3)

        conv_block_4 = self.buildConv2DBlock(pool3, 512, 4, 3)
        pool4, pool4_argmax = MaxPoolingWithArgmax2D()(conv_block_4)

        conv_block_5 = self.buildConv2DBlock(pool4, 512, 5, 3)
        pool5, pool5_argmax = MaxPoolingWithArgmax2D()(conv_block_5)

        fc6 = Conv2D(512, 7, use_bias=False, padding='valid',
                     name='fc6')(pool5)  #4096
        fc6 = BatchNormalization(name='batchnorm_fc6')(fc6)
        fc6 = Activation('relu', name='relu_fc6')(fc6)

        fc7 = Conv2D(512, 1, use_bias=False, padding='valid',
                     name='fc7')(fc6)  #4096
        fc7 = BatchNormalization(name='batchnorm_fc7')(fc7)
        fc7 = Activation('relu', name='relu_fc7')(fc7)

        x = Conv2DTranspose(512,
                            7,
                            use_bias=False,
                            padding='valid',
                            name='deconv-fc6')(fc7)
        x = BatchNormalization(name='batchnorm_deconv-fc6')(x)
        x = Activation('relu', name='relu_deconv-fc6')(x)
        x = MaxUnpooling2D(name='unpool5')([x, pool4_argmax])
        x.set_shape(conv_block_5.get_shape())

        x = Conv2DTranspose(512,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv5-1')(x)
        x = BatchNormalization(name='batchnorm_deconv5-1')(x)
        x = Activation('relu', name='relu_deconv5-1')(x)

        x = Conv2DTranspose(512,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv5-2')(x)
        x = BatchNormalization(name='batchnorm_deconv5-2')(x)
        x = Activation('relu', name='relu_deconv5-2')(x)

        x = Conv2DTranspose(512,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv5-3')(x)
        x = BatchNormalization(name='batchnorm_deconv5-3')(x)
        x = Activation('relu', name='relu_deconv5-3')(x)

        x = MaxUnpooling2D(name='unpool4')([x, pool4_argmax])
        x.set_shape(conv_block_4.get_shape())

        x = Conv2DTranspose(512,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv4-1')(x)
        x = BatchNormalization(name='batchnorm_deconv4-1')(x)
        x = Activation('relu', name='relu_deconv4-1')(x)

        x = Conv2DTranspose(512,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv4-2')(x)
        x = BatchNormalization(name='batchnorm_deconv4-2')(x)
        x = Activation('relu', name='relu_deconv4-2')(x)

        x = Conv2DTranspose(256,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv4-3')(x)
        x = BatchNormalization(name='batchnorm_deconv4-3')(x)
        x = Activation('relu', name='relu_deconv4-3')(x)

        x = MaxUnpooling2D(name='unpool3')([x, pool3_argmax])
        x.set_shape(conv_block_3.get_shape())

        x = Conv2DTranspose(256,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv3-1')(x)
        x = BatchNormalization(name='batchnorm_deconv3-1')(x)
        x = Activation('relu', name='relu_deconv3-1')(x)

        x = Conv2DTranspose(256,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv3-2')(x)
        x = BatchNormalization(name='batchnorm_deconv3-2')(x)
        x = Activation('relu', name='relu_deconv3-2')(x)

        x = Conv2DTranspose(128,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv3-3')(x)
        x = BatchNormalization(name='batchnorm_deconv3-3')(x)
        x = Activation('relu', name='relu_deconv3-3')(x)

        x = MaxUnpooling2D(name='unpool2')([x, pool2_argmax])
        x.set_shape(conv_block_2.get_shape())

        x = Conv2DTranspose(128,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv2-1')(x)
        x = BatchNormalization(name='batchnorm_deconv2-1')(x)
        x = Activation('relu', name='relu_deconv2-1')(x)

        x = Conv2DTranspose(64,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv2-2')(x)
        x = BatchNormalization(name='batchnorm_deconv2-2')(x)
        x = Activation('relu', name='relu_deconv2-2')(x)

        x = MaxUnpooling2D(name='unpool1')([x, pool1_argmax])
        x.set_shape(conv_block_1.get_shape())

        x = Conv2DTranspose(64,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv1-1')(x)
        x = BatchNormalization(name='batchnorm_deconv1-1')(x)
        x = Activation('relu', name='relu_deconv1-1')(x)

        x = Conv2DTranspose(64,
                            3,
                            use_bias=False,
                            padding='same',
                            name='deconv1-2')(x)
        x = BatchNormalization(name='batchnorm_deconv1-2')(x)
        x = Activation('relu', name='relu_deconv1-2')(x)

        output = Conv2DTranspose(21,
                                 1,
                                 activation='softmax',
                                 padding='same',
                                 name='output')(x)

        model = Model(inputs=inputs, outputs=output)
        vgg16 = VGG16(weights="imagenet",
                      include_top=False,
                      input_shape=(224, 224, 3))

        self.model = model

        if print_summary:
            print(self.model.summary())

        for layer in self.model.layers:
            if layer.name.startswith('conv'):
                block = layer.name[4:].split('-')[0]
                depth = layer.name[4:].split('-')[1]
                # apply vgg16 weights without bias
                layer.set_weights([
                    vgg16.get_layer('block{}_conv{}'.format(
                        block, depth)).get_weights()[0]
                ])

        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy', 'mse'])
コード例 #8
0
            return False
    except Exception as e:
        print(e)
        return False

if __name__ =="__main__":
    baseline_01 = 0.55
    baseline_02 = 1.0
    writer = None
    frame_per_second = 30
    maxLost = 5  # maximum number of object losts counted when the object is being tracked

    car_label_strlist = np.loadtxt("car_label2.csv", delimiter=',', dtype='str')

    #load vgg16
    pre_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), backend=keras.backend,
                      layers=keras.layers, models=keras.models, utils=keras.utils)
    pre_model.trainable = True
    pre_model.summary()

    # add output layer for VGG16 output (4096 -> 1000 => 4096 -> 1000 -> 100)
    vgg_model = models.Sequential()
    vgg_model.add(pre_model)
    vgg_model.add(layers.Flatten())
    vgg_model.add(layers.Dense(4096, activation='relu'))
    vgg_model.add(layers.Dense(1024, activation='relu'))
    vgg_model.add(layers.Dense(20, activation='softmax'))  # practical

    vgg_model.summary()
    vgg_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['acc'])
    #vgg_model.load_weights("y_s_weights.h5") # 가중치 모델 1
    #vgg_model.load_weights("y_up_weights.h5") # 가중치 모델 2
コード例 #9
0
# -*- coding: utf-8 -*-
"""vgg16_keras.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1kLOtfIlx8gpJamwqZSXZiJ67Tz7f1Pah
"""
import tensorflow

from keras_preprocessing.image import load_img, img_to_array

from keras_applications.vgg16 import preprocess_input, decode_predictions, VGG16

model = VGG16()

images = load_img("speaker.jpg", target_size=(224, 224))

img_array = img_to_array(images)

img_array

img_array = img_array.reshape(
    (1, img_array.shape[0], img_array.shape[1], img_array.shape[2]))

img_array.shape

image = preprocess_input(img_array)

image
コード例 #10
0
def train(version, batch_size, epochs):
    img_rows = 512
    img_cols = 512
    channel = 3
    num_of_train_samples = 70
    num_of_test_samples = 19
    train_data_path = 'F:\\PARAM\\DL\\diaretdb1_v_1_1\\diaretdb1_v_1_1\\resources\\images\\train'
    test_data_path = 'F:\\PARAM\\DL\\diaretdb1_v_1_1\\diaretdb1_v_1_1\\resources\\images\\test'
    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(
        train_data_path,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
        test_data_path,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical')
    if version == 0:
        model = VGG16(include_top=False,
                      weights='imagenet',
                      input_shape=(img_rows, img_cols, channel),
                      pooling='avg')
    else:
        model = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=(img_rows, img_cols, channel),
                      pooling='avg')

    for layer in model.layers:
        layer.trainable = False

    x = model.output
    predictions = Dense(2, activation='softmax')(x)
    model = Model(input=model.input, output=predictions)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit_generator(train_generator,
                        steps_per_epoch=num_of_train_samples // batch_size,
                        epochs=epochs,
                        verbose=1)
    model.save('VGG_' + str(version) + 'Exudate.h5')

    y_prediction = model.predict_generator(
        validation_generator, num_of_test_samples // batch_size + 1)
    y_pred = np.argmax(y_prediction, axis=1)
    print('Confusion Matrix')
    print(confusion_matrix(validation_generator.classes, y_pred))
    print('Classification Report')
    target_names = ['exudates', 'non_exudates']
    print(
        classification_report(validation_generator.classes,
                              y_pred,
                              target_names=target_names))
    cnf_matrix = confusion_matrix(validation_generator.classes, y_pred)
    plt.figure()
    plot_confusion_matrix(cnf_matrix, classes=target_names)
    plt.show()
    return model
コード例 #11
0
def Wls(video, start_time, location, latitude, longtitude):
    """"""
    savePath = video
    video = 'static/videos/'+video
    uploaded_time = 0.0

    baseline_01 = 0.55
    baseline_02 = 1.0
    writer = None
    frame_per_second = 30
    maxLost = 3  # maximum number of object losts counted when the object is being tracked

    car_label_strlist = np.loadtxt("Module/car_label2.csv", delimiter=',', dtype='str')

    #load vgg16
    pre_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), backend=keras.backend,
                      layers=keras.layers, models=keras.models, utils=keras.utils)
    pre_model.trainable = True
    pre_model.summary()

    # add output layer for VGG16 output (4096 -> 1000 => 4096 -> 1000 -> 100)
    vgg_model = models.Sequential()
    vgg_model.add(pre_model)
    vgg_model.add(layers.Flatten())
    vgg_model.add(layers.Dense(4096, activation='relu'))
    vgg_model.add(layers.Dense(1024, activation='relu'))
    vgg_model.add(layers.Dense(20, activation='softmax'))  # practical

    vgg_model.summary()
    vgg_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=2e-5), metrics=['acc'])
    #vgg_model.load_weights("y_s_weights.h5") # 가중치 모델 1
    #vgg_model.load_weights("y_up_weights.h5") # 가중치 모델 2
    vgg_model.load_weights("Module/CarDatabaseShare/y_seventh_car_weight.hdf5") #가중치 모델 3
    # Load Yolo
    net = cv.dnn.readNet("Module/CarDatabaseShare/yolov3.weights", "Module/CarDatabaseShare/yolov3.cfg")
    classes = []
    with open("Module/CarDatabaseShare/coco.names", "r") as f:
        classes = [line.strip() for line in f.readlines()]
    layer_names = net.getLayerNames()
    output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    #print(len(classes))
    colors = np.random.uniform(0, 255, size=(len(classes), 3))
    #print(colors)

    bbox_colors = np.random.randint(0, 255, size=(len(classes), 3))
    #print(bbox_colors)

    tracker = Tracker(maxLost=maxLost)
    cap = cv.VideoCapture(video)

    # Check if camera opened successfully
    if not cap.isOpened():
        print("Error opening video stream or file")
        sys.exit()

    vwidth = cap.get(3)  # float
    vheight = cap.get(4)  # float
    frame_count = 0
    while True:
        _, frame = cap.read()

        if _ is not True:
            break

        height, width, channels = frame.shape

        # Detecting objects
        blob = cv.dnn.blobFromImage(frame, 0.00392, (320, 320), (0, 0, 0), True, crop=False)

        net.setInput(blob)
        outs = net.forward(output_layers)

        # Showing informations on the screen
        class_ids = []
        confidences = []
        boxes = []
        detected_boxes = []
        detected_label = []
        detected_times = []
        detected_colors = []

        for out in outs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.5:
                    # Object detected
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)

                    # Rectangle coordinates
                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)

                    boxes.append([x, y, w, h])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)

        indexes = cv.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
        # print(indexes)
        font = cv.FONT_HERSHEY_PLAIN

        for i in range(len(boxes)):
            if i in indexes:
                x, y, w, h = boxes[i]
                if h > 40 and w > 50 and class_ids[i] == 2 and y > (vheight * baseline_01) and (y + h) < (vheight * baseline_02):
                    cropimg = frame.copy()
                    cropimg = frame[y:y+h, x:x+w]
                    try:
                        colorlabel = process_image(cropimg)
                        cropimg = cv.cvtColor(cropimg, cv.COLOR_BGR2GRAY)
                        cropimg = cv.resize(cropimg, dsize=(224, 224), interpolation=cv.INTER_AREA)
                        cropimg = cv.GaussianBlur(cropimg, (3, 3), 0)
                        cannimg = cv.Canny(cropimg, 25, 50)
                        npimg = np.asarray(cannimg)
                        npimg = np.stack((cannimg,)*3, axis=-1)
                        npimg = np.expand_dims(npimg, axis=0)
                        #print(npimg.shape)
                        car_recog = CarClassifier(npimg, vgg_model, car_label_strlist)
                        detected_boxes.append((x, y, x + w, y + h))
                        detected_label.append(car_recog)
                        detected_times.append(int(frame_count/frame_per_second) + 1)
                        detected_colors.append(colorlabel)
                    except:
                        continue
                    color = colors[i]
                    cv.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                    #cv.putText(frame, car_recog, (x, y + 30), font, 2, color, 3)
                    #cv.putText(frame, label, (x, y + 20), font, 3, color, 3)
                elif (class_ids[i] == 3 or class_ids[i] == 5 or class_ids[i] == 7) and y > (vheight * baseline_01) and (y + h) < (vheight * baseline_02):
                    cropimg = frame.copy()
                    cropimg = frame[y:y+h, x:x+w]
                    colorlabel = process_image(cropimg)
                    label = str(classes[class_ids[i]])
                    color = colors[i]
                    cv.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                    #cv.putText(frame, label, (x, y + 20), font, 2, color, 3)
                    detected_boxes.append((x, y, x + w, y + h))
                    detected_label.append(label)
                    detected_times.append(int(frame_count/frame_per_second) + 1)
                    detected_colors.append(colorlabel)

        cv.line(frame, (0,int(vheight*baseline_01)), (int(vwidth-1), int(vheight*baseline_01)), (0,0,0), 2)
        cv.line(frame, (0, int(vheight * baseline_02)), (int(vwidth - 1), int(vheight * baseline_02)), (0, 0, 0), 2)
        objects = tracker.update(detected_boxes, detected_label, detected_times, detected_colors)  # update tracker based on the newly detected objects

        for (objectID, centroid) in objects.items():
            text = "ID {}".format(objectID)
            if centroid[1] > (vheight * baseline_01) and centroid[1] < (vheight * baseline_02):
                cv.putText(frame, text, (centroid[0] - 10, centroid[1] - 10), cv.FONT_HERSHEY_SIMPLEX,
                           0.5, (0, 255, 0), 2)
                cv.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # cv.imshow('', frame)
        key = cv.waitKey(1)
        if key == 27:
            break
        if writer is None:
            startTime = start_time.replace(" ", "-")
            startTime = startTime.replace(":", "-")
            savepath=savePath.split(".")[0]
            filePath= "static/output/" + savepath + "-"  + startTime + ".mp4"
            print(filePath)
            fourcc = cv.VideoWriter_fourcc(*"h264")
            # fourcc = cv.VideoWriter_fourcc(*"mp4v")
            writer = cv.VideoWriter(filePath, fourcc, 30, (int(vwidth), int(vheight)), True)
        writer.write(frame)
        if frame_count % 50 == 0:
            classified_info_list = tracker.getClassifiedInfo()
            for obj in classified_info_list:
                print(obj)

        frame_count+=1


    cctv = Cctv(video_link=filePath, location=location, start_time=start_time, latitude=latitude, longtitude=longtitude)
    cctv.save()

    classified_info_list = tracker.getClassifiedInfo()
    for obj in classified_info_list:
        print(obj)

#car model 이름 나오게하기, appearance_time 에다가 초 더하기, 브랜드 넣으면 좋음
#CCTV video_link 제대로 넣기 // 했고
    for obj in classified_info_list:
        try:
            model_brand = obj[1][0][0].split("-")
            model = model_brand[0]
            brand = model_brand[1]
        except:
            model = obj[1][0][0]
            brand = obj[1][0][0]
        try:
            car = Car.objects.get(model=model)
            # car = Car.objects.get(model='a')
        except:
            car = Car(model=model, brand=brand)
            # car = Car(model='a', brand="")
            car.save()

        start_time = datetime.datetime.strptime(str(start_time), '%Y-%m-%d %H:%M:%S')
        appearance_time = start_time + timedelta(seconds=obj[2])

        cctv_log = CctvLog(car_model=car, color=obj[3], appearance_time=appearance_time, cctv_id=cctv)
        cctv_log.save()

    writer.release()
    cap.release()
    os.remove(video)
    cv.destroyAllWindows()
コード例 #12
0
    def set_net(self):
        """
        build encoder-decoder network using the (pre-trained) VGG16 network.

        :return: None
        """
        # make input tensor based on numpy array
        inp = Input(shape=(*self.img_size, 3), name='image')
        # pre-trained encoder (vgg16)
        encoder = VGG16(include_top=False, input_tensor=inp)

        # first decoding block
        decoder = UpSampling2D()(encoder.get_layer('block5_conv3').output)
        if self.skip_connections:
            decoder = concatenate(
                [decoder, encoder.get_layer('block4_conv3').output], axis=-1)
        decoder = Conv2D(256, (3, 3), activation='relu',
                         padding='same')(decoder)
        decoder = Conv2D(256, (3, 3), activation='relu',
                         padding='same')(decoder)

        # second decoding block
        decoder = UpSampling2D()(decoder)
        if self.skip_connections:
            decoder = concatenate(
                [decoder, encoder.get_layer('block3_conv3').output], axis=-1)
        decoder = Conv2D(128, (3, 3), activation='relu',
                         padding='same')(decoder)
        decoder = Conv2D(128, (3, 3), activation='relu',
                         padding='same')(decoder)

        # third decoding block
        decoder = UpSampling2D()(decoder)
        if self.skip_connections:
            decoder = concatenate(
                [decoder, encoder.get_layer('block2_conv2').output], axis=-1)
        decoder = Conv2D(64, (3, 3), activation='relu',
                         padding='same')(decoder)
        decoder = Conv2D(64, (3, 3), activation='relu',
                         padding='same')(decoder)

        # fourth decoding block
        decoder = UpSampling2D()(decoder)
        if self.skip_connections:
            decoder = concatenate(
                [decoder, encoder.get_layer('block1_conv2').output], axis=-1)
        decoder = Conv2D(32, (3, 3), activation='relu',
                         padding='same')(decoder)
        decoder = Conv2D(32, (3, 3), activation='relu',
                         padding='same')(decoder)

        # tensor with the output mask
        res_mask = Conv2D(self.mask_channels, (1, 1),
                          activation='softmax',
                          name='mask')(decoder)

        if self.classification:
            # build result tensor based on the output of the encoder
            res_classification = self._add_classification_branch(encoder)
            # model for multiple outputs (including the classification)
            model = Model([inp], [res_mask, res_classification])

        else:
            # model for single output (only the mask)
            model = Model([inp], [res_mask])

        # store model and meta-information regarding the layer names, shape, and index of the encoder layers
        self.neural_net = model
        self.encoder_layers = [(layer.name, layer.output_shape, i)
                               for i, layer in enumerate(encoder.layers)]
コード例 #13
0
Y = np_utils.to_categorical(labels, num_classes)

#Shuffle the dataset
x, y = shuffle(img_data, Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=2)

#########################################################################################
# Custom_vgg_model_1
#Training the classifier alone
image_input = Input(shape=(224, 224, 3))

model = VGG16(input_tensor=image_input, include_top=True, weights='imagenet')
model.summary()
last_layer = model.get_layer('fc2').output
#x= Flatten(name='flatten')(last_layer)
out = Dense(num_classes, activation='softmax', name='output')(last_layer)
custom_vgg_model = Model(image_input, out)
custom_vgg_model.summary()

for layer in custom_vgg_model.layers[:-1]:
    layer.trainable = False

custom_vgg_model.layers[3].trainable

custom_vgg_model.compile(loss='categorical_crossentropy',
                         optimizer='rmsprop',
                         metrics=['accuracy'])