Exemple #1
0
def img_proposal(img_path):
    """ 将给定的图片转换成 (框图片, 所在位置)的列表 """
    # img = skimage.io.imread(img_path)
    img = RCNN.load_img(img_path)
    img_lbl, regions = selectivesearch.selective_search(img,
                                                        scale=500,
                                                        sigma=0.9,
                                                        min_size=10)

    candidates = set()
    images = list()
    vertices = list()

    for r in regions:
        if r['rect'][2] == 0 or r['rect'][3] == 0:
            continue
        if r['size'] < 220:
            continue
        if r['rect'] in candidates:
            continue
        candidates.add(r['rect'])

        proposal_img, proposal_vertice = clip_img(img, r['rect'])
        resized_proposal_img = RCNN.resize_img(proposal_img, (224, 224))
        images.append(resized_proposal_img)
        vertices.append(proposal_vertice)

    return images, vertices
Exemple #2
0
    def __init__(self, master, test_img):
        self.master = master
        self.frame = tk.Frame(self.master)
        self.frame.pack(fill=BOTH, expand=1)
        self.master.wm_title("Detection Result")

        detection = RCNN.get_result_from_model(test_img, 0.16)
        load = Image.open(detection)
        width, height = load.size
        new_width = width + 50
        new_height = height + 50
        #self.master.geometry("600x600")
        if width > 1500 or height > 650:
            new_width = 1000
            new_height = int(new_width * height / width)
            load = load.resize((new_width, new_height), Image.ANTIALIAS)

        self.master.minsize(new_width, new_height)
        self.master.maxsize(new_width, new_height)
        render = ImageTk.PhotoImage(load)
        img = Label(self.frame, image=render)
        img.image = render
        img.place(x=25, y=25)

        self.quitButton = tk.Button(self.frame,
                                    text='Close this window',
                                    width=200,
                                    command=self.close_window)
        self.quitButton.pack(side=BOTTOM)
        self.frame.pack()
Exemple #3
0
def one_train_test(data, resize):
    x, y = data_transform_for_RCNN(data, resize)
    kv = BCI.gen_kv_idx(y, 10)
    acc = []
    loss = []
    for train_idx, test_idx in kv:
        x_train, y_train = x[train_idx], y[train_idx]
        x_test, y_test = x[test_idx], y[test_idx]
        model = RCNN.create_model(resize)
        model.fit(x_train,
                  y_train,
                  validation_data=(x_test, y_test),
                  epochs=epoch)
        metrics = model.evaluate(x_test, y_test)
        for i in range(len(model.metrics_names)):
            if (str(model.metrics_names[i] == 'acc')):
                acc.append(metrics[i])
            if (str(model.metrics_names[i] == 'loss')):
                loss.append(metrics[i])

    pen = open('/result.csv', 'a')

    config_sen = file + ',RCNN,out,' + str(resize) + ',' + str(epoch) + ','

    acc_sen = ''
    for v in acc:
        acc_sen += (str(v) + ',')
    acc_sen += (str(sum(acc) / float(len(acc))) + ',')
    loss_sen = ''
    for v in loss:
        los_sen += (str(v) + ',')
    loss_sen += (str(sum(loss) / float(len(loss))) + ',')
    pen.write(config_sen + acc_sen + loss_sen + '\n')
    pen.close()
Exemple #4
0
def rcnn_ts_batch(sub = '1'):
  import matplotlib.pyplot as plt
  for fold in range(1, 6):
    x_train = x_translator(scipy.io.loadmat('F:/KIST/source/BCI/BCI/BCI/kist_test_data/twist/rev/A0' + sub + 'T_' + str(fold) + '_train.mat')['csp_2_train'])
    x_test = x_translator(scipy.io.loadmat('F:/KIST/source/BCI/BCI/BCI/kist_test_data/twist/rev/A0' + sub + 'T_' + str(fold) + '_test.mat')['csp_2_test'])
    file = open('kist_test_data/twist/np/' + sub + '_' + str(fold) + '.pic', 'rb')
    raw = pickle.load(file)
    file.close()
    y_train = raw['y_train']
    y_test = raw['y_test']
    x_train_ = np.reshape(x_train, (len(x_train), 18 * 4))
    x_test_ = np.reshape(x_test, (len(x_test), 18 * 4))
    pca = PCA(n_components=2)
    X_r = pca.fit(x_train_).transform(x_train_)
    X2_r = pca.transform(x_test_)
    y = y_train.argmax(axis = 1)
    y2 = y_test.argmax(axis = 1)
    plt.figure()
    colors = ['navy', 'turquoise', 'darkorange']
    colors2 = ['black', 'green', 'yellow']
    target_names = ['1', '2', '3']
    for color, i, target_name in zip(colors, [0, 1, 2], target_names):
      plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=2,
                label=target_name)
    for color, i, target_name in zip(colors2, [0, 1, 2], target_names):
      plt.scatter(X2_r[y2 == i, 0], X2_r[y2 == i, 1], color=color, alpha=.8, lw=2,
                label=target_name)

    plt.show()

    model = RCNN.create_model((18, 4, 1))
    model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size = 5, epochs=100)
    print('d')
Exemple #5
0
def RCNN_batch(sub='1', epoch=100):
    path = 'data/A0' + sub + 'T.npz'
    data = scipy.io.loadmat('F:/KIST/source/BCI/BCI/BCI/mat/tscsp_rev/A0' +
                            sub + 'T.mat')
    x_, y = Competition.load_one_data(path)
    x = data['csp_2']
    import feature_selection as FS
    y = np.array(BCI.lab_inv_translator(y, 4))
    kv = BCI.gen_kv_idx(y, 10)
    import RCNN, data_trans
    fold = 1
    for train_idx, test_idx in kv:
        x_train, y_train = x[:, train_idx, :], y[train_idx]
        x_test, y_test = x[:, test_idx, :], y[test_idx]
        x_train = data_trans.x_translator(x_train)
        x_test = data_trans.x_translator(x_test)
        model = RCNN.create_model2((48, 45, 1))
        model.fit(x_train,
                  y_train,
                  validation_data=(x_test, y_test),
                  epochs=epoch,
                  batch_size=10)
        metrics = model.evaluate(x_test, y_test)
        pen = open('rcnn_competition_2.csv', 'a')
        pen.write('RCNN,' + sub + ',' + str(fold) + ',' + str(epoch) + ',' +
                  str(metrics[1]) + '\n')
        pen.close()
        fold += 1
Exemple #6
0
def test():
    log_dir = './logdir'
    snapshot_interval = 10000
    snapshot_dir = './snapshot_dir'
    max_iter = 100000
    log_interval = 100

    lr = 0.0005

    file = 'data/test_32x32.mat'
    X_raw, y_raw = getData(filename=file)
    n_test = X_raw.shape[0]
    y_raw[y_raw == 10] = 0
    y_raw = np.reshape(y_raw, (n_test, ))

    snapshot_file = './snapshot_dir/%s' % (snapshot_name)

    with tf.Session() as sess:
        X = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.int32, shape=(None, ))
        rcnn = RCNN(time=3, K=192, p=0.9, numclass=10, is_training=True)
        _, _, _, preds = rcnn.buile_model(X, y)

        snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots
        snapshot_saver.restore(sess, snapshot_file)

        np.random.seed(0)
        count = 0
        start = datetime.datetime.now()
        for i in range(n_test):
            image = X_raw[i]
            labels = y_raw[i]
            # print(image.shape)
            preds_each = sess.run(preds, feed_dict={X: image})
            if preds_each == labels:
                count += 1
            else:
                continue
    acc = count / (n_test * 1.0)
    end = datetime.datetime.now()
    print("sum time: {}, acc: {}".format(end - start, acc))
Exemple #7
0
def routine_from_npy(mode='pca', n_feature=64):
    file = open('res/' + mode + '/' + str(n_feature) + '.pic', 'rb')
    dat = pickle.load(file)
    file.close()
    for k, v in dat.items():
        model = RCNN.create_model(csp_val=csp_val)
        x = v['x']
        y = v['y']
        nf = k
        kf = KFold(n_splits=5, shuffle=False)
        #kv = kf.split(y)
        kv = gen_kvt_idx(y, 10)
        acc = []
        for train_idx, val_idx, test_idx in kv:
            x_train, x_test = x[train_idx], x[test_idx]
            x_val, y_val = x[val_idx], y[val_idx]
            y_train, y_test = y[train_idx], y[test_idx]
            #DNN = RCNN.create_model(csp_val=csp_val)
            DNN = create_DNN_model()
            x_train = x_train.reshape((x_train.shape[0], 16))
            x_val = x_val.reshape((x_val.shape[0], 16))
            x_test = x_test.reshape((x_test.shape[0], 16))

            yltr = label_test(y_train)
            ylte = label_test(y_test)

            epoch = 10000
            es = EarlyStopping(monitor='val_loss', patience=100, mode='auto')
            DNN.fit(x_train,
                    y_train,
                    validation_data=(x_val, y_val),
                    epochs=epoch,
                    callbacks=[es])
            metrics = DNN.evaluate(x_test, y_test)
            for i in range(len(DNN.metrics_names)):
                if (str(DNN.metrics_names[i]) == 'acc'):
                    acc.append(metrics[i])
                if (str(DNN.metrics_names[i]) == 'categorical_accuracy'):
                    acc.append(metrics[i])
                print(str(DNN.metrics_names[i]) + ": " + str(metrics[i]))
        pen = open('../result_pca.csv', 'a')
        pen.write(nf + ',' + mode + ',RCNN_DR:0.6,' + str(n_feature) + ',' +
                  str(epoch) + ',' + str(sum(acc) / float(len(acc))) + '\n')
        pen.close()

        pen2 = open('../result_pca_detailv' + '.' + str(csp_val) + '.csv', 'a')
        for accs in acc:
            pen2.write(nf + ',' + mode + ',RCNN_LabelValance,' + str(epoch) +
                       ',' + str(acc) + '\n')
        pen2.close()
Exemple #8
0
def one_routine(path, file, mode):
    os.chdir(path)
    nf = get_file_name(file)
    x, y = loader(nf, mode)
    kf = KFold(n_splits=20, shuffle=True)
    kv = kf.split(x)
    acc = []
    for train_idx, test_idx in kv:
        x_train, x_test = x[train_idx], x[test_idx]
        x_train = x_train.transpose()[:1800].transpose()
        x_test = x_test.transpose()[:1800].transpose()
        y_train, y_test = y[train_idx], y[test_idx]
        DNN = RCNN.create_model(csp_val=csp_val)
        x_train = x_train.reshape((x_train.shape[0], 100, 6, 3))
        x_test = x_test.reshape((x_test.shape[0], 100, 6, 3))

        epoch = 10000
        es = EarlyStopping(monitor='val_acc', patience=500, mode='auto')
        DNN.fit(x_train,
                y_train,
                validation_data=(x_test, y_test),
                epochs=epoch,
                callbacks=[es],
                batch_size=1)
        metrics = DNN.evaluate(x_test, y_test)

        for i in range(len(DNN.metrics_names)):
            if (str(DNN.metrics_names[i]) == 'acc'):
                acc.append(metrics[i])
            if (str(DNN.metrics_names[i]) == 'categorical_accuracy'):
                acc.append(metrics[i])
            print(str(DNN.metrics_names[i]) + ": " + str(metrics[i]))
    pen = open('../result_S2_' + mode + '.' + str(csp_val) + '.csv', 'a')
    pen.write(nf + ',' + mode + ',RCNN_raw_1,' + str(epoch) + ',' +
              str(sum(acc) / float(len(acc))) + '\n')
    pen.close()

    pen2 = open('../result_S2_detailv_' + mode + '.' + str(csp_val) + '.csv',
                'a')
    for accs in acc:
        pen2.write(nf + ',' + mode + ',RCNN_raw_1,' + str(epoch) + ',' +
                   str(acc) + '\n')
    pen2.close()
Exemple #9
0
def train_svm(train_file, params_path):
    train_list = os.listdir(train_file)
    svms = list()  # 保存训练好的svm分类器(每个类别训练一个)

    # 对每个文件列表(单一类)训练svm
    for train_txtfile in train_list:
        if 'pkl' in train_txtfile:
            continue
        x, y = generate_single_svm(train_file + train_txtfile)

        # 预测丢掉最后一层全连接,取倒数第二层的4096个输出作为特征输入给SVM
        svm_features = list()
        for i in x:
            feature = RCNN.predic_to_svm(params_path, i)
            svm_features.append(feature)

        clf = svm.LinearSVC()
        clf.fit(svm_features, y)
        svms.append(clf)

    return svms
Exemple #10
0
def classification(file):
    data = scipy.io.loadmat(file)['csp'][0][0]
    train_x = data[0]
    train_y = data[1]
    test_x = data[2]
    test_y = data[3]
    for i in range(5):
        tx = np.transpose(train_x[i])
        tx = np.reshape(tx, (tx.shape[0], tx.shape[1], tx.shape[2], 1))
        ty = np.transpose(train_y[i])
        vx = np.transpose(test_x[i])
        vx = np.reshape(vx, (vx.shape[0], vx.shape[1], vx.shape[2], 1))
        vy = np.transpose(test_y[i])
        from keras.callbacks import EarlyStopping
        model = RCNN.create_model((tx.shape[1], tx.shape[2], 1))
        #model = CNN.create_model((tx.shape[1], tx.shape[2], 1))
        #model.fit(tx[:60,:,:], ty[:60,:], validation_data=(vx, vy), epochs=100, shuffle = True)
        model.fit(tx, ty, validation_data=(vx, vy), epochs=100, shuffle=True)
        metrics = model.evaluate(vx, vy)
        pen = open('rcnn_1107.csv', 'a')
        sub = file.split('_')[-1].split('.')[0]
        pen.write('RCNN,' + sub + ',' + str(metrics[1]) + '\n')
Exemple #11
0
	time_elapsed = time.time() - since
	print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

	return model


''' ############################ Parameters ############################'''
lr = 0.001

batch_size = 32
# root_data_dir should contain 3 sub-folders: 'train' , 'validation' and  'test
root_data_dir = '/Users/Mor\'s Yoga/Documents/GitHub/DetectionProject/ProcessedData'

''' ############################    Main    ############################'''
# net = create_model()
net = RCNN(num_regressions=4, train_phase='regressions')
# net.load_state_dict(torch.load(PATH_TO_STATE_DICT_PR_FILE))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1000)

# Decay LR by a factor of 0.1 * lr every n epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.0001)

# Create train and validation data loaders
trainDataLoader = BusDataLoader(root_dir=os.path.join(root_data_dir, 'train'),
                                data_loader_type='train',
                                BGpad=16,
                                outShape=224,
                                balance_data_size=1,
                                augment_pos=0)
Exemple #12
0
def create_tRCNN():
    model = RCNN.makeModel(18, 6, 3, 3)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    return model
Exemple #13
0
DROPOUT = 0.5
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
if ADD_SENTI:
    SENTIMENT_SIZE = len(STEXT.vocab)
    # print(len(STEXT.vocab))
    # print(len(TEXT.vocab))
    # print(STEXT.vocab.stoi)
    # print("-----------------\n")
    # print(TEXT.vocab.stoi)
    SPAD_IDX = STEXT.vocab.stoi[STEXT.pad_token]
    SENTI_DIM = 26
    if FILTER:
        model = CNN.CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_IDX,
                     senti_size=SENTIMENT_SIZE, senti_dim=SENTI_DIM, passes=2, add_senti=True, spad_idx=SPAD_IDX)
    else:
        model = RCNN.CNN(INPUT_DIM, EMBEDDING_DIM, 8, 300, OUTPUT_DIM, DROPOUT, PAD_IDX,
                     senti_size=SENTIMENT_SIZE, senti_dim=SENTI_DIM, passes=2, add_senti=True, spad_idx=SPAD_IDX)
    SUNK_IDX = STEXT.vocab.stoi[STEXT.unk_token]
    model.sentiembedding.weight.data.copy_(STEXT.vocab.vectors)
    model.sentiembedding.weight.data[SUNK_IDX] = torch.zeros(SENTI_DIM)
    model.sentiembedding.weight.data[SPAD_IDX] = torch.zeros(SENTI_DIM)
else:
    if FILTER:
        model = CNN.CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_IDX)
    else:
        model = RCNN.CNN(INPUT_DIM, EMBEDDING_DIM, 5, 300, OUTPUT_DIM, DROPOUT, PAD_IDX)
if WVINIT:
    pretrained_embeddings = TEXT.vocab.vectors
    model.embedding.weight.data.copy_(pretrained_embeddings)
    UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
    model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
    model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
				print('Saved model checkpoint with val_acc: {}'.format(str(round(current_epoch_acc, 4))))

	time_elapsed = time.time() - since
	print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

	return model


''' ############################ Parameters ############################'''
lr = 0.001
batch_size = 32
# root_data_dir should contain 3 sub-folders: 'train' , 'validation' and  'test
root_data_dir = '/Users/royhirsch/Documents/GitHub/ProcessedData'

''' ############################    Main    ############################'''
net = RCNN()
# weight_tensor = torch.tensor([1, 100]).float()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)

# Decay LR by a factor of 0.1 * lr every n epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.0001)

# Create train and validation data loaders
trainDataLoader = BusDataLoader(root_dir=os.path.join(root_data_dir, 'train'),
                                data_loader_type='train',
                                BGpad=16,
                                outShape=224,
                                balance_data_size=1,
                                augment_pos=0)
Exemple #15
0
def main():
    log_dir = './logdir'
    snapshot_interval = 10000
    snapshot_dir = './snapshot_dir'
    max_iter = 100000
    log_interval = 100

    lr = 0.0005

    file = 'data/train_32x32.mat'
    X_raw, y_raw = getData(filename=file)
    n_train = X_raw.shape[0]
    y_raw[y_raw == 10] = 0
    y_raw = np.reshape(y_raw, (n_train, ))

    with tf.Session() as sess:
        X = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.int32, shape=(None, ))
        rcnn = RCNN(time=3, K=192, p=0.9, numclass=10, is_training=True)
        loss, summary_op, acc, _ = rcnn.buile_model(X, y)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr,
                                           beta1=0.9,
                                           beta2=0.98,
                                           epsilon=1e-8).minimize(loss)
        init = tf.global_variables_initializer()
        sess.run(init)

        os.makedirs(snapshot_dir, exist_ok=True)
        snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots

        writer = tf.summary.FileWriter(log_dir, sess.graph)
        np.random.seed(0)
        loss_mean = 0
        acc_mean = 0
        start = datetime.datetime.now()
        for n_iter in range(max_iter):
            index = np.random.choice(n_train, 64, replace=True)
            image = X_raw[index]
            labels = y_raw[index]
            # print(image.shape)
            loss_batch, summary_op_batch, acc_batch, _ = sess.run(
                [loss, summary_op, acc, optimizer],
                feed_dict={
                    X: image,
                    y: labels
                })
            loss_mean += loss_batch
            acc_mean += acc_batch
            if (n_iter + 1) % log_interval == 0 or (n_iter + 1) == max_iter:
                loss_mean = loss_mean / (log_interval * 1.0)
                acc_mean = acc_mean / (log_interval * 1.0)
                batch_time = datetime.datetime.now()
                print("time: {},iter = {}\n\tloss = {}, accuracy (cur) = {} ".
                      format(batch_time - start, n_iter + 1, loss_mean,
                             acc_mean))
                loss_mean = 0
                acc_mean = 0

            writer.add_summary(summary_op_batch, global_step=n_iter)

            if (n_iter + 1) % snapshot_interval == 0 or (n_iter +
                                                         1) == max_iter:
                snapshot_file = os.path.join(snapshot_dir,
                                             "%08d" % (n_iter + 1))
                snapshot_saver.save(sess,
                                    snapshot_file,
                                    write_meta_graph=False)
                print('snapshot saved to ' + snapshot_file)

    end = datetime.datetime.now()
    print("sum time: {}".format(end - start))
    writer.close()
Exemple #16
0
    trainfile_folder = 'source/svm_train/'
    img_path = 'source/image_0080.jpg'
    load_path = ['model/fine_tune_params.npz', 'model/net_params.npz']

    if os.path.isfile('subrefine_dataset.pkl'):
        print('Loading Data...')
        x, y = IOU.load_from_pkl('subrefine_dataset.pkl')
    else:
        print('Reading Data...')
        IOU.load_train_proposals('source/subrefine_list.txt',
                                 2,
                                 save=True,
                                 save_path='subrefine_dataset.pkl')
        x, y = IOU.load_from_pkl('subrefine_dataset.pkl')

    RCNN.fine_tune(load_path, (x, y), 3)

    print('Start train svm...')

    svms = train_svm(trainfile_folder, load_path[0])

    print('Train svm over...')

    imgs, vertices = IOU.img_proposal(img_path)  # 从一张图片得到若干个框图以及相应的坐标
    features = RCNN.predic_to_svm(load_path[0], imgs)  # 若干框图的特征

    results = list()
    results_labels = list()
    count = 0
    for f in features:  # 对每个框进行分类
        for i in svms:  # 对每个类别的分类器判断是否属于此类别(单个框可能被判断属于多个分类)
Exemple #17
0
def main(args):
    assert args.dataset in ['mnist', 'cifar', 'svhn'], \
        "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
    assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \
        "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
        "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector"
    #model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
    model_file = "../model/densenet_cifar10.h5df"
    print(model_file)
    assert os.path.isfile(model_file), \
        'model file not found... must first train model using train_model.py.'
    if args.dataset == 'svhn' and args.attack == 'cw-l2':
        assert args.batch_size == 16, \
        "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \
        "otherwise, there will be error at the last batch-- needs to be fixed."

    print('Dataset: %s. Attack: %s' % (args.dataset, args.attack))
    # Create TF session, set it as Keras backend
    init_op = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init_op)
    #sess.run(tf.local_variables_initializer())
    sess.run(tf.initialize_all_variables())
    tf.keras.backend.set_session(sess)

    if args.attack == 'cw-l2' or args.attack == 'cw-lid':
        warnings.warn("Important: remove the softmax layer for cw attacks!")
        # use softmax=False to load without softmax layer
        if args.model == 'dense':
            model = densenet.create_dense_net(10,
                                              False, (32, 32, 3),
                                              40,
                                              3,
                                              12,
                                              16,
                                              dropout_rate=0)
            optimizer = Adam(
                lr=1e-4)  # Using Adam instead of SGD to speed up training
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer,
                          metrics=["accuracy"])
        if args.dataset == 'mnist':
            model = get_model(args.dataset, softmax=False)
            model.compile(loss=cross_entropy,
                          optimizer='adadelta',
                          metrics=['accuracy'])
        if args.dataset == 'svhn':
            model = RCNN.get_model(False)
        model.load_weights(model_file)
    else:
        if args.model == 'dense':
            model = densenet.create_dense_net(10,
                                              True, (32, 32, 3),
                                              40,
                                              3,
                                              12,
                                              16,
                                              dropout_rate=0)

            optimizer = Adam(
                lr=1e-4)  # Using Adam instead of SGD to speed up training
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer,
                          metrics=["accuracy"])
            model.load_weights(model_file)

        elif args.dataset == 'svhn':
            model = RCNN.get_model(True)
            model.load_weights(model_file)
        else:
            model = load_model(model_file)

    _, _, X_test, Y_test = get_data(args.dataset)
    score = model.evaluate(X_test,
                           Y_test,
                           batch_size=args.batch_size,
                           verbose=0)
    print("Accuracy on the test set: %0.2f%%" % (100 * score[1]))

    if args.attack == 'cw-lid':  # white box attacking LID detector - an example
        X_test = X_test[:1000]
        Y_test = Y_test[:1000]

    if args.attack == 'all':
        # Cycle through all attacks
        for attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2']:
            craft_one_type(sess, model, X_test, Y_test, args.dataset, attack,
                           args.batch_size)
    else:
        # Craft one specific attack type
        craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack,
                       args.batch_size)
    print('Adversarial samples crafted and saved to %s ' % PATH_DATA)
    _, acc = model.evaluate(X_test,
                            Y_test,
                            batch_size=args.batch_size,
                            verbose=0)
    print("After crafting, Accuracy on the test set: %0.2f%%" % (100 * acc))
    sess.close()
Exemple #18
0
    # compute the area of both the prediction and ground-truth
    # rectangles
    boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
    boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)

    # compute the intersection over union by taking the intersection
    # area and dividing it by the sum of prediction + ground-truth
    # areas - the interesection area
    iou = interArea / float(boxAArea + boxBArea - interArea)

    # return the intersection over union value
    return iou


''' ############################    Main    ############################'''
net = RCNN()
net.load_state_dict(torch.load(checkpoint_path, map_location='cpu'))

# Create test and validation data loaders
testDataLoader = BusDataLoader(root_dir=os.path.join(root_data_dir, 'test'),
                               data_loader_type='test',
                               BGpad=16,
                               outShape=224,
                               balance_data_size=2,
                               augment_pos=0)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device is ' + str(device))
net.to(device)
label_df = get_GT_labels(label_path)
Exemple #19
0
import RCNN
import os
import skimage.io as io
import matplotlib.pyplot as plt
#os.environ["KERAS_BACKEND"]= 'tensorflow'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'

mymodel = RCNN.load_model('/home/kaicao/Research/Mask_framework/15_12_0140.h5')

img = plt.imread(
    '/home/kaicao/Dropbox/Research/Data/Latent/NISTSD27/image/001.bmp')
mask, mask_ori = func_demo.generate_mask(
    mymodel,
    img,
    '/home/kaicao/Dropbox/Research/Data/Latent/NISTSD27',
    gen_type='box',
    fuse_thres=1)

print mask.shape
plt.set_cmap('gray')
plt.imshow(mask)
plt.show()
Exemple #20
0
def getData(filename):
    load_data = sio.loadmat(filename)
    y = load_data['y']
    X = load_data['X'].transpose(3, 0, 1, 2)
    return X, y


file = 'data/svhn/svhn_test.mat'
X_raw, y_raw = getData(filename=file)
X_raw = X_raw.astype('float32')
n_test = X_raw.shape[0]
y_raw[y_raw == 10] = 0

from keras.layers import *
from keras.utils import np_utils

from keras.models import load_model
#model = load_model('model-RCNN_new.hdf5')
model = RCNN.get_model(True)
model_file = "../model/model-svhn.h5"
model.load_weights(model_file)

y_raw = np_utils.to_categorical(y_raw, 10)
score = model.evaluate(X_raw, y_raw, verbose=0)
print(score[1])

from util import get_data
_, _, X_test, Y_test = get_data('svhn')
score = model.evaluate(X_test, Y_test, verbose=0)
print(score[1])
Exemple #21
0
def load_train_proposals(datapath,
                         num_class,
                         threshold=0.5,
                         svm=False,
                         save=False,
                         save_path='dataset.npz'):
    """ 从给定的datapath中返回 images 和 label。其中 images 是 select=search 得到的候选框, label 是经过阈值比较后
    得到的标签,0表示背景。

    Args:
        datapath: txt文件,每一行包含 图片位置 类别 标签框(用来进行阈值比较)
        num_class: 类别数目
        threshold: IOU阈值
        svm: 是否喂给SVM
        save: 是否保存
        save_path: 保存路径

    Returns:
        训练数据list(images, labels)
    """
    train_list = open(datapath, 'r')
    images = list()  # 保存训练样本
    labels = list()  # 保存样本对应的标签
    value_error = 0
    i = 0
    t1 = time.time()

    for line in train_list:
        t2 = time.time()
        tmp = line.strip().split(" ")
        img = RCNN.load_img(tmp[0])
        if img is None:
            continue

        # img_lbl: [r,g,b,region], regions{'rect'[left, bottom, w, h], 'size', 'labels'}
        img_lbl, regions = selectivesearch.selective_search(img,
                                                            sigma=0.9,
                                                            min_size=10)
        candidates = set()  # 保存select得到的候选框

        for r in regions:
            if r['rect'] in candidates:
                continue
            if r['rect'][2] <= 0 or r['rect'][3] <= 0:
                continue
            if r['size'] < 224:
                continue
            candidates.add(r['rect'])

            proposal_img, proposal_vertice = clip_img(img, r['rect'])
            try:
                img_array = RCNN.resize_img(proposal_img, (224, 224))
            except:
                value_error += 1
                continue
            images.append(img_array)

            ref_rect = tmp[2].split(',')  # 标签框?
            ref_rect_int = [int(x) for x in ref_rect]
            ref_rect_int = [
                ref_rect_int[0], ref_rect_int[1],
                ref_rect_int[0] + ref_rect_int[2],
                ref_rect_int[1] + ref_rect_int[3]
            ]
            iou_value = iou(ref_rect_int, proposal_vertice)

            idx = int(tmp[1])  # 属于的类别

            # 如果是SVM分类则label形式为int,若是fine-tune则为ont-hot
            if not svm:
                label = np.zeros(num_class + 1)
                if iou_value < threshold:  # 小于阈值则作为背景
                    label[0] = 1
                else:
                    label[idx] = 1
                labels.append(labels)
            else:
                if iou_value < threshold:
                    labels.append(0)
                else:
                    labels.append(idx)
        i += 1
        t3 = time.time() - t2
        print('{} image was proposaled!, cost {}s'.format(i, t3))
    t4 = time.time() - t1
    print('\ncount of value error is {}, total cost {}s'.format(
        value_error, t4))

    if save:
        try:
            np.savez(save_path, params=[images, labels])
        except Exception as e:
            print('np save error: {}'.format(e))
        else:
            print('[*]save SUCCESS!')