Exemplo n.º 1
0
def HierarchicalCNN(dimensions, 
                    convnet_name='alexnet',weights_path=None, 
                    output_layer="dense_2"):
    """
    Hierarchical CNN
    Computes visual features via convnet, and carries these features into a 
    hierarchical classifier where we have n_binary levels of binary 
    classification, followed by a final classification step with n_out classes.
    
    The loss function is a combination between the n_binary+1 losses
    """
    hierarchy = Graph()
    cnn = convnet(convnet_name, weights_path, output_layer, 
                  convolutionize = False, trainable=trainable)
    
    input_shape = cnn.layers[0].input_shape[1:]
    hierarchy.add_input('input_image', input_shape=input_shape)
    hierarchy.add_node(cnn,'feat_0', input='input_image')
    for n,d in enumerate(dimensions):
        if n+1<len(dimensions):
            dim_out = 2
            emb = Dense(d, activation='relu')
            hierarchy.add_node(emb, name='feat_'+str(1+n), input='feat_'+str(n))
        else:
            dim_out = d
        out = Dense(dim_out, activation='softmax', W_regularizer=l2())
        hierarchy.add_node(out, name='softmax'+str(n), input='feat_'+str(n))
        
    hierarchy.add_output(name='output', 
                         inputs=[o for o in hierarchy.nodes if 'softmax' in o], 
                                 merge_mode="concat")
    return hierarchy
Exemplo n.º 2
0
def get_trained_model():
    """
    Function return Alexnet model trained on 
    one million images of Imagenet dataset. 

    """
    model = convnets.convnet('alexnet',weights_path= "/scratch/sk1846_data/trainedModels/alexnet_weights.h5",heatmap=False)

    return model 
Exemplo n.º 3
0
def test_heap_one():
    #-----------------------------------------------------------------
    # 1: Set some necessary parameters
    weights_path = 'model/convnet_227_weights_epoch02_loss0.0030.h5'
    size = 227
    labels = {
        '0': 0,
        '1': 1,
        '2': 2,
        '3': 3,
        '4': 4,
        '5': 5,
        '6': 6,
        '7': 7,
        '8': 8,
        '9': 9,
        '10': 10,
        '15': 11,
        '16': 12
    }

    #-----------------------------------------------------------------
    # 2: Build the Keras model
    model = convnet('alexnet', weights_path=weights_path, heatmap=True)
    model.save_weights('test_model/conv.h5')

    #-----------------------------------------------------------------
    # 5: Create the validation set batch generator
    data_generator = icdar.generator(input_size=227,
                                     batch_size=1,
                                     labels=labels,
                                     vis=False)
    cnt = 0
    right = 0
    while True:
        cnt += 1
        X, y_true = next(data_generator)
        y_pred = model.predict(X)
        y_true_label = np.argmax(y_true)
        y_pred_label = np.argmax(y_pred)
        if y_pred_label == 0:
            print 'y_pred is background'
        elif y_pred_label == 1:
            print 'y_pred is negedge'
        else:
            print 'y_pred is posedge'
        if y_true_label == y_pred_label:
            print True
            right += 1
        else:
            print False
        print 'the accuracy is %f' % (float(right) / cnt)
Exemplo n.º 4
0
def main():
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model = convnet('alexnet', heatmap=False)
    model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
    
    with h5py.File('model/alexnet_weights.h5', 'r') as f:
        names = f.keys()
        for i in names:
            print i
            for j in f[i].keys():
                print f[i][j].shape
        print 'print h5 end'
    layers = model.layers
    for i in layers:
        print i.name
        for j in i.get_weights():
            print j.shape
    print 'print model end'
    for i in range(5):
        print '\n'
    f = h5py.File('model/alexnet_weights.h5', 'r')
    for i in layers:
        name = i.name
        if 'conv_' in name:
            if len(i.get_weights()) > 0:
                print 'load %s weights'%name
                w = f[name][name+'_W']
                b = f[name][name+'_b']
                w = np.transpose(w, [2, 3, 1, 0])
                i.set_weights([w, b])
            else:
                print 'layers %s has no weights'%name
        elif 'dense_' in name and name != 'dense_4':
            print 'load %s weights'%name
            w = f[name][name+'_W']
            b = f[name][name+'_b']
            i.set_weights([w, b])
    print 'load end!!!'
    print 'save weights'
    model.save_weights('model/tf_alexnet_weights.h5')
Exemplo n.º 5
0
def main():
    #-----------------------------------------------------------------
    # 1: Set some necessary parameters
    #weights_path = 'model/v4_0_1_convnet_227_weights_epoch06_loss0.0012.h5'
    weights_path = 'model/v4_0_3_convnet_227_weights_epoch04_loss0.0004.h5'

    #-----------------------------------------------------------------
    # 2: Build the Keras model
    model = convnet('alexnet', weights_path=weights_path, heatmap=True)
    
    posedge_path = '/home/zhaoke/justrypython/ks_idcard_ocr/testimg/card_bat/'
    negedge_path = '/home/zhaoke/justrypython/ks_idcard_ocr/testimg/neg_imgs/'
    background_path = '/home/zhaoke/gtest/ADEChallengeData2016/images/training2w/'
    
    for step_cnt in range(50):
        step = 355.0
        thres = 0.5 + 0.01 * step_cnt
        print '-----------%f------------'%thres
        starttime = datetime.datetime.now()
        print 'start time is ', starttime
        
        pos_cnt = 0
        pos_rgt = 0
        for i in os.listdir(posedge_path):
            img = cv2.imread(posedge_path+i)
            img = img[:, :, ::-1]
            factor = min(img.shape[0]/step, img.shape[1]/step)
            reshape = (int(img.shape[1]/factor), int(img.shape[0]/factor))
            img = cv2.resize(img, reshape)
            if img.shape[0] < 227 or img.shape[1] < 227:
                continue
            result = model.predict(np.array([img]))
            result = predict(result, thres=thres)
            pos_cnt += 1
            if result == 2:
                pos_rgt += 1
            else:
                pass
                #cv2.imwrite('results/pos/%d.jpg'%pos_cnt, img[:, :, ::-1])
        
        neg_cnt = 0
        neg_rgt = 0
        for i in os.listdir(negedge_path):
            img = cv2.imread(negedge_path+i)
            img = img[:, :, ::-1]
            factor = min(img.shape[0]/step, img.shape[1]/step)
            reshape = (int(img.shape[1]/factor), int(img.shape[0]/factor))
            img = cv2.resize(img, reshape)
            if img.shape[0] < 227 or img.shape[1] < 227:
                continue
            result = model.predict(np.array([img]))
            result = predict(result, thres=thres)
            neg_cnt += 1
            if result == 1:
                neg_rgt += 1
            else:
                pass
                #cv2.imwrite('results/neg/%d.jpg'%neg_cnt, img[:, :, ::-1])
        
        bck_cnt = 0
        bck_rgt = 0
        for i in os.listdir(background_path):
            img = cv2.imread(background_path+i)
            img = img[:, :, ::-1]
            factor = min(img.shape[0]/step, img.shape[1]/step)
            reshape = (int(img.shape[1]/factor), int(img.shape[0]/factor))
            img = cv2.resize(img, reshape)
            if img.shape[0] < 227 or img.shape[1] < 227:
                continue
            result = model.predict(np.array([img]))
            result = predict(result, thres=thres)
            bck_cnt += 1
            if result == 0:
                bck_rgt += 1
            else:
                pass
                #cv2.imwrite('results/bgd/%d.jpg'%bck_cnt, img[:, :, ::-1])
            if bck_cnt > 500:
                break
            
        print 'the posedge rate is %.3f'%(float(pos_rgt)/pos_cnt)
        print 'the negedge rate is %.3f'%(float(neg_rgt)/neg_cnt)
        print 'the background rate is %.3f'%(float(bck_rgt)/bck_cnt)
        print 'the total rate is %.3f'%(float(pos_rgt+neg_rgt+bck_rgt)/(pos_cnt+neg_cnt+bck_cnt))
    
        endtime = datetime.datetime.now()
        print 'the total time is ', endtime - starttime
    
    print 'end'
Exemplo n.º 6
0
        else:
            return None
    except:
        print("Exception occured: ")
        traceback.print_exc(file=sys.stdout)
        return "{}", 500


if __name__ == "__main__":
    if len(sys.argv) < 4:
        print "Usage: " + sys.argv[0] + " HOST PORT PATH_TO_PORNH5 CACHING"
        sys.exit(1)

    pornH5Path = sys.argv[3]
    if not pornH5Path.endswith('p**n.h5'):
        pornH5Path = os.path.join(pornH5Path, 'p**n.h5')

    caching = False
    if len(sys.argv) > 4:
        caching = bool(sys.argv[4])

    model = convnet('alexnet', output_layer='dense_2')
    model.add(Dense(1, activation='sigmoid', name='classifier'))
    model.load_weights(pornH5Path)

    sgd = SGD(lr=.5, decay=1.e-6, momentum=0., nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy')

    print "Ready"
    app.run(debug=False, host=sys.argv[1], port=sys.argv[2], threaded=False)
Exemplo n.º 7
0
def test_compare():
    #-----------------------------------------------------------------
    # 1: Set some necessary parameters
    heap_weights_path = 'model/convnet_227_weights_epoch02_loss0.0030.h5'
    nohp_weights_path = 'model/convnet_227_weights_epoch02_loss0.0030.h5'
    size = 227
    labels = {
        '0': 0,
        '1': 1,
        '2': 2,
        '3': 3,
        '4': 4,
        '5': 5,
        '6': 6,
        '7': 7,
        '8': 8,
        '9': 9,
        '10': 10,
        '15': 11,
        '16': 12
    }

    #-----------------------------------------------------------------
    # 5: Create the validation set batch generator
    data_generator = icdar.generator(input_size=227,
                                     batch_size=1,
                                     labels=labels,
                                     vis=False)
    with tf.Session() as sess:
        nohp_model = convnet('alexnet',
                             weights_path=nohp_weights_path,
                             heatmap=False)
        heap_model = convnet('alexnet',
                             weights_path=heap_weights_path,
                             heatmap=True)
        X, y_true = next(data_generator)
        nohp_y_pred = nohp_model.predict(X)
        heap_y_pred = heap_model.predict(X)
        graph = tf.get_default_graph()
        ops = [v for v in graph.get_operations()]
        plcers = [i for i in ops if i.type == 'Placeholder']
        pool0 = graph.get_tensor_by_name('convpool_5/MaxPool:0')
        pool1 = graph.get_tensor_by_name('convpool_5_1/MaxPool:0')
        pool2 = graph.get_tensor_by_name('convpool_5_2/MaxPool:0')
        a, b, c = sess.run([pool0, pool1, pool2],
                           feed_dict={
                               'input_1:0': X,
                               'input_2:0': X,
                               'input_3:0': X
                           })
        dense0 = graph.get_tensor_by_name('dense_1/Relu:0')
        dense1 = graph.get_tensor_by_name('dense_1_1/Relu:0')
        dense2 = graph.get_tensor_by_name('dense_1_2/Relu:0')
        a, b, c = sess.run([dense0, dense1, dense2],
                           feed_dict={
                               'input_1:0': X,
                               'input_2:0': X,
                               'input_3:0': X
                           })
        dense0 = graph.get_tensor_by_name('dense_2/Relu:0')
        dense1 = graph.get_tensor_by_name('dense_2_1/Relu:0')
        dense2 = graph.get_tensor_by_name('dense_2_2/Relu:0')
        a, b, c = sess.run(
            [dense0, dense1, dense2],
            feed_dict={
                'input_1:0': X,
                'input_2:0': X,
                'input_3:0': X,
                'dropout_1/keras_learning_phase:0': False
            })
        dense0 = graph.get_tensor_by_name('dense_3/BiasAdd:0')
        dense1 = graph.get_tensor_by_name('dense_3_1/BiasAdd:0')
        dense2 = graph.get_tensor_by_name('dense_3_2/BiasAdd:0')
        a, b, c = sess.run(
            [dense0, dense1, dense2],
            feed_dict={
                'input_1:0': X,
                'input_2:0': X,
                'input_3:0': X,
                'dropout_1/keras_learning_phase:0': False
            })
        dense0 = graph.get_tensor_by_name('dense_4/BiasAdd:0')
        dense1 = graph.get_tensor_by_name('dense_4_1/BiasAdd:0')
        dense2 = graph.get_tensor_by_name('dense_4_2/BiasAdd:0')
        a, b, c = sess.run(
            [dense0, dense1, dense2],
            feed_dict={
                'input_1:0': X,
                'input_2:0': X,
                'input_3:0': X,
                'dropout_1/keras_learning_phase:0': False
            })
        dense0 = graph.get_tensor_by_name('softmax/Softmax:0')
        dense1 = graph.get_tensor_by_name('softmax_1/Softmax:0')
        dense2 = graph.get_tensor_by_name('softmax_2/Reshape_1:0')
        a, b, c = sess.run(
            [dense0, dense1, dense2],
            feed_dict={
                'input_1:0': X,
                'input_2:0': X,
                'input_3:0': X,
                'dropout_1/keras_learning_phase:0': False
            })
        print 'end'
Exemplo n.º 8
0
def alexnet():
    if not os.path.exists("alexnet_keras.zip"):
        print("Downloading Alexnet Keras Helpers")
        with file_io.FileIO(
                "gs://fynd-open-source/research/MILDNet/alexnet_keras.zip",
                mode='r') as alexnet_keras:
            with file_io.FileIO("alexnet_keras.zip", mode='w+') as output_f:
                output_f.write(alexnet_keras.read())
        dest_path = "/root/.local/lib/python2.7/site-packages/trainer"
        with zipfile.ZipFile("alexnet_keras.zip", 'r') as zip_ref:
            zip_ref.extractall(dest_path)
            import shutil
            for f in os.listdir("{}/alexnet_keras/".format(dest_path)):
                shutil.copy("{}/alexnet_keras/{}".format(dest_path, f),
                            "{}/{}".format(dest_path, f))
                shutil.copy("{}/alexnet_keras/{}".format(dest_path, f),
                            "{}/{}".format("/user_dir", f))

    os.popen("pip install keras==2.0.4").read()
    from convnets import convnet

    alexnet_model = convnet('alexnet',
                            weights_path="alexnet_weights.h5",
                            heatmap=False)
    convnet_output = GlobalAveragePooling2D()(
        alexnet_model.get_layer('convpool_5').output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        convnet_output)

    first_input = Input(shape=(3, 227, 227))
    first_maxpool = MaxPooling2D(pool_size=4, strides=4)(first_input)
    first_conv = Conv2D(96, kernel_size=8, strides=4,
                        activation='relu')(first_maxpool)
    first_zero_padding = ZeroPadding2D(padding=(3, 3))(first_conv)
    first_maxpool2 = MaxPooling2D(pool_size=7, strides=4,
                                  padding='same')(first_zero_padding)
    first_maxpool2 = Flatten()(first_maxpool2)
    first_maxpool2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        first_maxpool2)

    second_input = Input(shape=(3, 227, 227))
    second_maxpool = MaxPooling2D(pool_size=8, strides=8)(second_input)
    second_conv = Conv2D(96, kernel_size=8, strides=4,
                         activation='relu')(second_maxpool)
    second_zero_padding = ZeroPadding2D(padding=(1, 1))(second_conv)
    second_maxpool2 = MaxPooling2D(pool_size=3, strides=2,
                                   padding='same')(second_zero_padding)
    second_maxpool2 = Flatten()(second_maxpool2)
    second_maxpool2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        second_maxpool2)

    merge_one = concatenate([first_maxpool2, second_maxpool2])
    merge_two = concatenate([merge_one, convnet_output])
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)

    final_model = Model(
        inputs=[first_input, second_input, alexnet_model.input],
        outputs=l2_norm_final)

    return final_model
Exemplo n.º 9
0
def main():
    #-----------------------------------------------------------------
    # 1: Set some necessary parameters
    weights_path = 'model/v4_0_1_convnet_227_weights_epoch06_loss0.0012.h5'

    #-----------------------------------------------------------------
    # 2: Build the Keras model
    model = convnet('alexnet', weights_path=weights_path, heatmap=True)

    path = '/media/zhaoke/806602c3-72ac-4719-b178-abc72b3fa783/share/10000id_part/'
    dst_path = '/media/zhaoke/806602c3-72ac-4719-b178-abc72b3fa783/share/10000id_part_classified_2/'

    bad_case = [
        '/media/zhaoke/806602c3-72ac-4719-b178-abc72b3fa783/share/10000id_part/1YHK/3/3/943_songjing/1.jpg'
    ]

    starttime = datetime.datetime.now()
    print 'starttime: ', starttime

    imgs = []
    for root, dirnames, filenames in os.walk(path):
        for filename in filenames:
            if filename.endswith('.jpg'):
                imgs.append(os.path.join(root, filename))

    cnt = 0
    for i in imgs:
        if i in bad_case:
            continue
        img = cv2.imread(i)
        if img is None:
            continue
        elif img.shape[0] < 227 or img.shape[1] < 227:
            continue
        #factor = min(img.shape[0]/720.0, img.shape[1]/1280.0)
        #reshape = (int(img.shape[1]/factor), int(img.shape[0]/factor))
        factor = min(img.shape[0] / 227.0, img.shape[1] / 227.0)
        reshape = (int(img.shape[1] / factor), int(img.shape[0] / factor))
        reshape = (max(227, reshape[0]), max(227, reshape[1]))
        raw_img = img.copy()
        img = cv2.resize(img, reshape)
        cnt += 1
        img = img[:, :, ::-1]
        result = model.predict(np.array([img]))
        result = predict(result)
        if result == 0:
            dst_img_path = os.path.join(dst_path, 'bgd',
                                        '%d_' % cnt + i[-i[::-1].find('/'):])
            cv2.imwrite(dst_img_path, img[:, :, ::-1])
        elif result == 1:
            dst_img_path = os.path.join(dst_path, 'neg',
                                        '%d_' % cnt + i[-i[::-1].find('/'):])
            cv2.imwrite(dst_img_path, img[:, :, ::-1])
        else:
            dst_img_path = os.path.join(dst_path, 'pos',
                                        '%d_' % cnt + i[-i[::-1].find('/'):])
            cv2.imwrite(dst_img_path, img[:, :, ::-1])
        if cnt % 20 == 0:
            print cnt

    endtime = datetime.datetime.now()
    print 'the total time is ', endtime - starttime
Exemplo n.º 10
0
def main():
    #-----------------------------------------------------------------
    # 1: Set some necessary parameters
    #data_path = 'model/tf_alexnet_weights.h5'
    data_path = None
    size = 227
    labels = {'0': 0,
              '1': 1,
              '2': 2,
              '3': 3,
              '4': 4,
              '5': 5,
              '6': 6,
              '7': 7,
              '8': 8,
              '9': 9,
              '10': 10,
              '15': 11,
              '16': 12}

    #-----------------------------------------------------------------
    # 2: Build the Keras model
    sgd = SGD(lr=0.01, decay=5e-4, momentum=0.9, nesterov=True)
    
    model = convnet('alexnet', weights_path=data_path, heatmap=False)
    model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])

    #-----------------------------------------------------------------
    # 4: Instantiate an encoder that can encode ground truth labels into 
    #    the format needed by the EAST loss function
    
    #-----------------------------------------------------------------
    # 5: Create the validation set batch generator
    #data_generator = icdar.get_batch(num_workers=1,
                                     #input_size=size,
                                     #batch_size=1,
                                     #labels=labels)
    #valid_generator = icdar.get_batch(num_workers=1,
                                      #input_size=size,
                                      #batch_size=1,
                                      #labels=labels)
    data_generator = dataprocess.data_generator('', window_size=227*227, random_size=1e6, batch_size=64, mode='prime')
    valid_generator = dataprocess.data_generator('', window_size=227*227, batch_size=64, mode='prime')
    #data_generator.next()
    
    #-----------------------------------------------------------------
    # 6: Run training
    model.fit_generator(generator = data_generator,
                        steps_per_epoch = 5000,
                        epochs = 100,
                        callbacks = [ModelCheckpoint('./model/pcnn_227_weights_epoch{epoch:02d}_loss{loss:.4f}.h5',
                                                     monitor='val_loss',
                                                     verbose=1,
                                                     save_best_only=True,
                                                     save_weights_only=True,
                                                     mode='auto',
                                                     period=1),
                                     ReduceLROnPlateau(monitor='val_loss',
                                                       factor=0.5,
                                                       patience=0,
                                                       epsilon=0.001,
                                                       cooldown=0)],
                        validation_data = valid_generator,
                        validation_steps = 500)
Exemplo n.º 11
0
        return json.dumps({ "results": probs })
      else:
        return None
    except:
      print("Exception occured: ")
      traceback.print_exc(file=sys.stdout)
      return "{}", 500

if __name__ == "__main__":
    if len(sys.argv) < 4:
        print "Usage: "+sys.argv[0]+" HOST PORT PATH_TO_PORNH5 CACHING"
        sys.exit(1)

    pornH5Path = sys.argv[3]
    if not pornH5Path.endswith('p**n.h5'):
        pornH5Path = os.path.join(pornH5Path, 'p**n.h5')

    caching = False
    if len(sys.argv) > 4:
        caching = bool(sys.argv[4])

    model = convnet('alexnet', output_layer='dense_2')
    model.add(Dense(1, activation='sigmoid', name='classifier'))
    model.load_weights(pornH5Path)
    
    sgd = SGD(lr=.5, decay=1.e-6, momentum=0., nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy')

    print "Ready"
    app.run(debug=False, host=sys.argv[1], port=sys.argv[2], threaded=False)
Exemplo n.º 12
0
        '5': 5,
        '6': 6,
        '7': 7,
        '8': 8,
        '9': 9,
        '10': 10,
        '15': 11,
        '16': 12
    }
    a = np.random.randint(1, 6, (512, 512, 1))
    nb_classes = 6
    b = indices_to_one_hot(a, nb_classes)
    c = one_hot_to_indices(b)
    a = generator(input_size=227, batch_size=1, labels=labels, vis=True)
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model = convnet('alexnet', heatmap=False)
    model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
    cnt = 0
    while True:
        img, y_true = next(a)
        if y_true.argmax() == 0:
            cv2.imwrite('results/bgd/%d.jpg' % cnt, img[0])
        elif y_true.argmax() == 1:
            cv2.imwrite('results/neg/%d.jpg' % cnt, img[0])
        else:
            cv2.imwrite('results/pos/%d.jpg' % cnt, img[0])
        cnt += 1
        y_pred = model.predict(np.array(img))
        print((y_pred - y_true)**2).mean()
        print 'end'
Exemplo n.º 13
0
    train_files = pkl.load(open("porn_classif/train_files.pkl","r"))
    test_files = pkl.load(open("porn_classif/test_files.pkl","r"))
    train_set = "porn_classif/train_set.h5"
    test_set = "porn_classif/test_set.h5"

    

    
###########################################
######### WE CHOOSE THE MODEL #############

random.shuffle(data)

input = Input(shape=(3,227,227))
alexnet = convnet('alexnet', weights_path='weights/alexnet_weights.h5',
                output_layer='dense_2',
                trainable=["None"])

filtered_img = alexnet(input)
classifier = Dense(1,
                   activation='sigmoid',
                   name='classifier',
                   W_regularizer=l2(0.001))(filtered_img)

model = Model(input=input,output=classifier)

sgd = SGD(lr=.1, decay=1.e-6, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy',metrics=["accuracy"])


batch_size = 64
Exemplo n.º 14
0
        data, "porn_classif/")

else:
    train_files = pkl.load(open("porn_classif/train_files.pkl", "r"))
    test_files = pkl.load(open("porn_classif/test_files.pkl", "r"))
    train_set = "porn_classif/train_set.h5"
    test_set = "porn_classif/test_set.h5"

###########################################
######### WE CHOOSE THE MODEL #############

random.shuffle(data)

input = Input(shape=(3, 227, 227))
alexnet = convnet('alexnet',
                  weights_path='weights/alexnet_weights.h5',
                  output_layer='dense_2',
                  trainable=["None"])

filtered_img = alexnet(input)
classifier = Dense(1,
                   activation='sigmoid',
                   name='classifier',
                   W_regularizer=l2(0.001))(filtered_img)

model = Model(input=input, output=classifier)

sgd = SGD(lr=.1, decay=1.e-6, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=["accuracy"])

batch_size = 64