Ejemplo n.º 1
0
def load_mydata(CONSTRUCTION_METHOD, NUMBER_OF_CLASSES, csv_filepath,
                data_filepath):

    if CONSTRUCTION_METHOD == 'image':
        train_df, input_shape = data_from_csv(csv_filepath, data_filepath)
        print('Create Tensor Data...')
        x_train_channelb = read_images(train_df.channelb.values, input_shape)
        x_train_channelc = read_images(train_df.channelc.values, input_shape)
        x_train_channeld = read_images(train_df.channeld.values, input_shape)
    else:
        train_df, input_shape = data_from_csv_nonimage(csv_filepath,
                                                       data_filepath)
        x_train_channelb = read_data(train_df.channelb.values, input_shape)
        x_train_channelc = read_data(train_df.channelc.values, input_shape)
        x_train_channeld = read_data(train_df.channeld.values, input_shape)

    # labels - convert class vectors to binary class matrices One Hot Encoding
    labels = train_df.object.values
    labels = to_categorical(labels, NUMBER_OF_CLASSES)

    x_train_comp = np.stack(
        (x_train_channelb, x_train_channelc, x_train_channeld), axis=4)

    x_train, x_test, y_train, y_test = train_test_split(x_train_comp,
                                                        labels,
                                                        test_size=0.3,
                                                        random_state=None)

    return (x_train, y_train), (x_test, y_test)
Ejemplo n.º 2
0
def test(model, attack_func, method_name):
    if os.path.exists("results"):
        shutil.rmtree("results")
    os.mkdir("results");

    acc = 0
    for (file_name, image, label) in read_images():  
        if test_model_acc == True:
            acc += np.argmax(model.predictions(image)) == label
            continue 
        print(file_name, end="\t\t")

        np.random.seed(label+2)
        target_class = int(np.random.random()*200)
        adversarial = attack_func(model, image, target_class)
        store_adversarial(file_name, adversarial)

        
        if adversarial is None:
            print("can't find")
        elif np.argmax(model.predictions(adversarial)) == target_class:
            print("l2: %.4f" %np.linalg.norm(image/255 - adversarial/255))
        else:
            print("error");
            exit()

        

    if test_model_acc == True:
        print("model accuracy:  %.4f" %(acc/200)); exit()
    
    print("\n", method_name, "\n")
    compute_MAD()
Ejemplo n.º 3
0
def generate(files, steering, batch_size, augment_data=True):
    #data = shuffle(data)
    num_examples = len(files)
    offset = num_examples
    i = 1
    while True:
        if (offset + batch_size) >= num_examples:
            offset = 0
            i = 1
            files_s, steering_s = shuffle(files, steering)
        for offset in range(0, num_examples, batch_size):
            i += 1
            end = offset + batch_size
            if end >= num_examples:
                end = num_examples
            filename_x, batch_y = files_s[offset:end], steering_s[offset:end]
            if augment_data:
                batch_x, batch_y = utils.augment_dataset_single(
                    filename_x, batch_y)
            else:
                batch_x = utils.read_images(filename_x)
            # Rescale and resize only
            batch_x = utils.preprocess_images(batch_x, False)

            yield batch_x.astype('float32'), batch_y.astype('float32')
Ejemplo n.º 4
0
def create_forest():
    """ Create new random decision forest
    """
    print('Creating random forest')
    images = read_images()

    # assign classes
    image_classes = [icls for icls, _ in images]

    # extract features
    feature_vectors = []
    for _, img_path in images:
        cur = get_feature_vector(img_path)
        feature_vectors.append(cur)

    assert len(feature_vectors) > 0, 'Must have at least one feature vector to train on'

    # good estimator number for classification tasks
    sqrt_feat_num = int(np.sqrt(len(feature_vectors[0])))
    print(' ', sqrt_feat_num, 'estimator%s' % ('' if sqrt_feat_num == 1 else 's'))

    # create forest
    clf = RandomForestClassifier(
        n_estimators=sqrt_feat_num, n_jobs=-1,
        max_depth=None, min_samples_split=1
    )
    clf = clf.fit(feature_vectors, image_classes)

    return clf
Ejemplo n.º 5
0
def test(thetas):
    size_img, rows, cols, images = utils.read_images(
        TRAINING_IMAGES_PATH
    )
    size_lbl, labels = utils.read_labels(TRAINING_LABELS_PATH)

    bias_terms = np.ones([size_img, 1])
    images = np.concatenate((bias_terms, images), axis=1)

    training_images = images[range(0, M_TRAINING), :]
    training_labels = labels[range(0, M_TRAINING), :]

    X = training_images / 255
    y = training_labels
    accuracy = _test(thetas, y, X, M_TRAINING)

    print 'Trained with {0} examples. Training accuracy: {1}'.format(
        M_TRAINING,
        accuracy
    )

    test_labels = labels[range(M_TRAINING, M_TRAINING + M_TESTING), :]
    test_images = images[range(M_TRAINING, M_TRAINING + M_TESTING), :]

    X = test_images / 255
    y = test_labels
    accuracy = _test(thetas, y, X, M_TESTING)

    print 'Tested with {0} examples. Test accuracy: {1}'.format(M_TESTING,
                                                                accuracy)
Ejemplo n.º 6
0
def train(alpha, n_iter, plot=False):
    size_img, rows, cols, images = utils.read_images(
        TRAINING_IMAGES_PATH
    )
    size_lbl, labels = utils.read_labels(TRAINING_LABELS_PATH)

    images = images[range(0, M_TRAINING), :]
    labels = labels[range(0, M_TRAINING), :]
    size_img = M_TRAINING

    bias_terms = np.ones([size_img, 1], dtype=np.float64)
    images = np.concatenate((bias_terms, images), axis=1).astype(np.float64)
    thetas = np.zeros([rows*cols+1, N_LABELS], dtype=np.float64)
    costs = np.zeros([n_iter, N_LABELS])
    X = images / 255
    for i in range(N_LABELS):
        # print 'Training a classifier for label {0}'.format(i)
        y = np.array([[1 if label == i else 0 for label in labels]]).T
        thetas[:, i:i+1], costs[:, i:i+1] = func.gradient_descent(
            thetas[:, i:i+1],
            y, X, alpha,
            n_iter
        )
        if plot:
            plt.plot(costs[:, i:i+1])
            plt.show()
    return thetas
Ejemplo n.º 7
0
def process(input_path, destination_path, id_delta=0):
    print "processing ", input_path
    images = utils.read_images(input_path)
    print len(images), " images read"
    new_images = utils.convert_all_files(images)
    updated_list = [(id + id_delta, image) for id, image in new_images]
    print "saving"
    utils.save_images_width_id(updated_list, destination_path)
Ejemplo n.º 8
0
def read_action(action_path):
    action_name=get_action_name(action_path)
    category=get_category(action_name)
    person=get_person(action_name)
    all_files=utils.get_files(action_path)
    all_files=utils.append_path(action_path+"/",all_files)
    frames=utils.read_images(all_files)
    return Action(category,person,frames)
Ejemplo n.º 9
0
def process(input_path, destination_path, id_delta=0):
    print "processing ", input_path
    images = utils.read_images(input_path)
    print len(images), " images read"
    new_images = utils.convert_all_files(images)
    updated_list = [(id+id_delta, image) for id, image in new_images]
    print "saving"
    utils.save_images_width_id(updated_list, destination_path)
Ejemplo n.º 10
0
def train_model(unique_labels, file_dir, num_samples, le_file_path, num_epochs, model_file_path):
    model = create_model(unique_labels)
    file_list = utils.get_file_list(file_dir, samples=num_samples, training=True)
    logger.info('Beginning training model')
    images, labels = utils.read_images(file_list, le_file_path, training=True)
    model.fit(images, labels, epochs=num_epochs)
    logger.info('Finished fitting model')
    logger.info(f'Saving model to {model_file_path}')
    model.save(model_file_path)
Ejemplo n.º 11
0
def main():
    data = read_df(data_dir/'sentinel_main.csv')
    image_list = read_images(image_dir)

    for ((state, region), group) in data.groupby(['state', 'region']):
        with ThreadPoolExecutor(max_workers=5) as executor:
            rows = [(Coordinate(row['lat'], row['lng']), row['cluster'])
                    for _, row in group.iterrows() if str(row['cluster']) not in image_list]

            results = list(tqdm(executor.map(process, rows), desc=f'State: {state}, Region: {region}', total=len(rows)))
Ejemplo n.º 12
0
def test_processing():
    # load images
    image_list = utils.read_images('/Users/matti/Documents/forritun/andlit2/')
    print len(image_list)
    # pre_ids, pre_images = zip(*image_list)
    processed_images = utils.convert_all_files(image_list)
    print len(processed_images)
    # ids, images = zip(*processed_images)
    # print " pre: ", pre_ids
    # print "post: ", ids
    # utils.save_images(images)
    utils.save_images_width_id(processed_images)
Ejemplo n.º 13
0
Archivo: test.py Proyecto: orvitinn/msc
def test_processing():
    # load images
    image_list = utils.read_images('/Users/matti/Documents/forritun/andlit2/')
    print len(image_list)
    # pre_ids, pre_images = zip(*image_list)
    processed_images = utils.convert_all_files(image_list)
    print len(processed_images)
    # ids, images = zip(*processed_images)
    # print " pre: ", pre_ids
    # print "post: ", ids
    # utils.save_images(images)
    utils.save_images_width_id(processed_images)
Ejemplo n.º 14
0
def predict_images(model_file_path, predict_file_dir, le_file_path):
    model = tf.keras.models.load_model(model_file_path)
    file_list = utils.get_file_list(predict_file_dir)
    images, _ = utils.read_images(file_list, le_file_path)
    with open(le_file_path, 'rb') as f:
        le = pickle.load(f)
    predictions = model.predict(images)
    predicted_classes = le.inverse_transform(
        [np.argmax(prediction) for prediction in predictions])
    for idx in range(len(predictions)):
        file_name = file_list[idx].split('\\')[-1]
        predicted_class = predicted_classes[idx]
        logger.info(f'File: {file_name} | Predicted Class: {predicted_class}')
Ejemplo n.º 15
0
 def load_data(self, train_p_data_dir, train_n_data_dir):
     p_data = read_images(train_p_data_dir,
                          self.image_size,
                          normalize=True,
                          limit=5000)
     n_data = read_images(train_n_data_dir,
                          self.image_size,
                          normalize=True,
                          limit=10000)
     random.shuffle(p_data)
     random.shuffle(n_data)
     # 分割出test集合
     split_p = int(len(p_data) * 0.2)
     split_n = int(len(n_data) * 0.2)
     self.test_p_data = p_data[:split_p]
     self.test_n_data = n_data[:split_n]
     p_data = p_data[split_p:]
     n_data = n_data[split_n:]
     split_p = int(len(p_data) * 0.7)
     split_n = int(len(n_data) * 0.7)
     self.train_p_data = p_data[:split_p]
     self.train_n_data = n_data[:split_n]
     self.valid_p_data = p_data[split_p:]
     self.valid_n_data = n_data[split_n:]
     self.train_p_num, self.train_n_num = len(self.train_p_data), len(
         self.train_n_data)
     self.valid_p_num, self.valid_n_num = len(self.valid_p_data), len(
         self.valid_n_data)
     # 训练数据自增操作
     self.train_p_data += list(map(np.flipud, self.train_p_data))
     self.train_n_data += list(map(np.flipud, self.train_n_data))
     #self.train_n_data = self.train_n_data[:len(self.train_p_data)]
     print("train data size: ", "p:", len(self.train_p_data), "n:",
           len(self.train_n_data))
     print("valid data size: ", "p:", len(self.valid_p_data), "n:",
           len(self.valid_n_data))
     print("test data size: ", "p:", len(self.test_p_data), "n:",
           len(self.test_n_data))
Ejemplo n.º 16
0
def load_data(path,batch_size=25):
    all_files=utils.get_all_files(path)
    all_files=utils.append_path(path,all_files)
    images=utils.read_images(all_files)
    images=utils.flatten_images(images)
    images=map(utils.normalize,images)
    images=np.array(images)
    n_batches=get_number_of_batches(batch_size,len(images))
    def get_batch(i):
        return images[i * batch_size: (i+1) * batch_size]
    batches=map(get_batch,range(n_batches))
    batches = [np.array(batch) for batch in batches]
    print("Dataset loaded")
    return np.array(batches)
#mean = [0.5, 0.5, 0.5]
#std=[1, 1, 1]

# mean from dataset058:
mean = [0.3823625683879477, 0.3790166856065496, 0.3554138533338805]
std=[0.21754145353302254, 0.21271749678359336, 0.21233947166469555]

#mean = [0.383661700858527, 0.3819784115384924, 0.3588786631614881]
#std=[0.2167717755518767, 0.21201058526724945, 0.21143164036556178]

# Load the dataset
from utils import read_images_stl10 as read_images
from utils import read_labels_stl10 as read_labels

#unlab_set_x = read_images('../data/stl10_binary/unlabeled_X.bin')
test_set_x = read_images('../../data/stl10_binary/test_X.bin')
train_set_x = read_images('../../data/stl10_binary/train_X.bin')

test_set_y = read_labels('../../data/stl10_binary/test_y.bin')
train_set_y = read_labels('../../data/stl10_binary/train_y.bin')

print 'Train set information: '
print (len(train_set_x), type(train_set_x[3]))
print ''
print 'Test set information: '
print (len(test_set_x), type(test_set_x[3]))
print ''

# loading the folds:
with open('../../data/stl10_binary/fold_indices.txt', 'r') as f_folds:
    folds = f_folds.readlines()
Ejemplo n.º 18
0
Archivo: test.py Proyecto: orvitinn/msc
def test_detection():
    image_list = utils.read_images('/Users/matti/Documents/forritun/andlit2/s18')
    fd = utils.FaceDetector()
    ids, images = zip(*processed_images)
Ejemplo n.º 19
0
from optim import SGD
from utils import read_images, read_labels
from model import LeNet

parser = argparse.ArgumentParser(
    description='Training LeNet on MNIST using NumPy.')
parser.add_argument('data_dir', type=str, help='directory to mnist data')

args = parser.parse_args()

epoch = 10
lr = 0.1
momentum = 0.8
batch = 256

train_data = read_images(os.path.join(args.data_dir,
                                      'train-images.idx3-ubyte'))
train_labels = read_labels(
    os.path.join(args.data_dir, 'train-labels.idx1-ubyte'))
test_data = read_images(os.path.join(args.data_dir, 't10k-images.idx3-ubyte'))
test_labels = read_labels(os.path.join(args.data_dir,
                                       't10k-labels.idx1-ubyte'))

# normalize
train_data = (train_data - train_data.mean(
    (1, 2), keepdims=True)) / train_data.std((1, 2), keepdims=True)
test_data = (test_data - test_data.mean(
    (1, 2), keepdims=True)) / test_data.std((1, 2), keepdims=True)

my_net = LeNet()
optimizer = SGD(my_net.parameters(), lr, momentum)
Ejemplo n.º 20
0
    print "starting...."

    # This is where we write the images, if an output_dir is given
    # in command line:
    out_dir = None
    # You'll need at least a path to your image data, please see
    # the tutorial coming with this source code on how to prepare
    # your image data:
    path = '/Users/matti/Documents/forritun/att_faces/'
    if len(sys.argv) > 1:
        path = sys.argv[1]

    print "reading images from " + path
    # Now read in the image data. This must be a valid path!
    # [X, y] = read_images(path)
    input_faces = utils.read_images(path)

    # use a random image
    # random.seed()
    # r = len(X)
    # random_index = random.randint(0, r-1)

    # print "using image ", random_index, " id: ", y[random_index]

    prufu_mynd = None

    # test data and label
    # prufu_mynd, tl = X[random_index], y[random_index]
    # and remove test from the data
    # del X[random_index]
    # del y[random_index]
Ejemplo n.º 21
0
def test_detection():
    image_list = utils.read_images(
        '/Users/matti/Documents/forritun/andlit2/s18')
    fd = utils.FaceDetector()
    ids, images = zip(*processed_images)
Ejemplo n.º 22
0
def train(FLAG):
    print("Reading dataset...")
    # load data
    Xtrain, Ytrain = read_images(TRAIN_DIR), read_masks(TRAIN_DIR, onehot=True)
    Xtest, Ytest = read_images(VAL_DIR), read_masks(VAL_DIR, onehot=True)
    track = [
        "hw3-train-validation/validation/0008",
        "hw3-train-validation/validation/0097",
        "hw3-train-validation/validation/0107"
    ]
    Xtrack, Ytrack = read_list(track)

    vgg16 = VGG16(classes=7, shape=(256, 256, 3))
    vgg16.build(vgg16_npy_path=FLAG.init_from,
                mode=FLAG.mode,
                keep_prob=FLAG.keep_prob)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
    checkpoint_path = os.path.join(FLAG.save_dir, 'model.ckpt')

    def initialize_uninitialized(sess):
        global_vars = tf.global_variables()
        is_not_initialized = sess.run(
            [tf.is_variable_initialized(var) for var in global_vars])
        not_initialized_vars = [
            v for (v, f) in zip(global_vars, is_not_initialized) if not f
        ]
        if len(not_initialized_vars):
            sess.run(tf.variables_initializer(not_initialized_vars))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # hyper parameters
        batch_size = 32
        epoch = 500
        early_stop_patience = 50
        min_delta = 0.0001
        opt_type = 'adam'

        # recorder
        epoch_counter = 0

        # optimizer
        global_step = tf.Variable(0, trainable=False)

        # Passing global_step to minimize() will increment it at each step.
        if opt_type is 'sgd':
            start_learning_rate = FLAG.lr
            half_cycle = 2000
            learning_rate = tf.train.exponential_decay(start_learning_rate,
                                                       global_step,
                                                       half_cycle,
                                                       0.5,
                                                       staircase=True)
            opt = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                             momentum=0.9,
                                             use_nesterov=True)
        else:
            start_learning_rate = FLAG.lr
            half_cycle = 2000
            learning_rate = tf.train.exponential_decay(start_learning_rate,
                                                       global_step,
                                                       half_cycle,
                                                       0.5,
                                                       staircase=True)
            opt = tf.train.AdamOptimizer(learning_rate=learning_rate)

        obj = vgg16.loss
        train_op = opt.minimize(obj, global_step=global_step)

        # progress bar
        ptrain = IntProgress()
        pval = IntProgress()
        display(ptrain)
        display(pval)
        ptrain.max = int(Xtrain.shape[0] / batch_size)
        pval.max = int(Xtest.shape[0] / batch_size)

        # re-initialize
        initialize_uninitialized(sess)

        # reset due to adding a new task
        patience_counter = 0
        current_best_val_loss = np.float('Inf')

        # optimize when the aggregated obj
        while (patience_counter < early_stop_patience
               and epoch_counter < epoch):

            # start training
            stime = time.time()
            bar_train = Bar(
                'Training',
                max=int(Xtrain.shape[0] / batch_size),
                suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds')
            bar_val = Bar(
                'Validation',
                max=int(Xtest.shape[0] / batch_size),
                suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds')

            train_loss, train_accu = 0.0, 0.0
            for i in range(int(Xtrain.shape[0] / batch_size)):
                st = i * batch_size
                ed = (i + 1) * batch_size
                loss, accu, _ = sess.run(
                    [obj, vgg16.accuracy, train_op],
                    feed_dict={
                        vgg16.x: Xtrain[st:ed, :],
                        vgg16.y: Ytrain[st:ed, :],
                        vgg16.is_train: True
                    })
                train_loss += loss
                train_accu += accu
                ptrain.value += 1
                ptrain.description = "Training %s/%s" % (ptrain.value,
                                                         ptrain.max)
            train_loss = train_loss / ptrain.value
            train_accu = train_accu / ptrain.value

            # validation
            val_loss = 0
            val_accu = 0
            for i in range(int(Xtest.shape[0] / batch_size)):
                st = i * batch_size
                ed = (i + 1) * batch_size
                loss, accu = sess.run(
                    [obj, vgg16.accuracy],
                    feed_dict={
                        vgg16.x: Xtest[st:ed, :],
                        vgg16.y: Ytest[st:ed, :],
                        vgg16.is_train: False
                    })
                val_loss += loss
                val_accu += accu
                pval.value += 1
                pval.description = "Testing %s/%s" % (pval.value, pval.value)
            val_loss = val_loss / pval.value
            val_accu = val_accu / pval.value

            # plot
            if epoch_counter % 10 == 0:
                Xplot = sess.run(vgg16.pred,
                                 feed_dict={
                                     vgg16.x: Xtrack[:, :],
                                     vgg16.y: Ytrack[:, :],
                                     vgg16.is_train: False
                                 })

                for i, fname in enumerate(track):
                    saveimg = skimage.transform.resize(Xplot[i],
                                                       output_shape=(512, 512),
                                                       order=0,
                                                       preserve_range=True,
                                                       clip=False)
                    saveimg = label2rgb(saveimg)
                    imageio.imwrite(
                        os.path.join(
                            FLAG.save_dir,
                            os.path.basename(fname) + "_pred_" +
                            str(epoch_counter) + ".png"), saveimg)
                    print(
                        os.path.join(
                            FLAG.save_dir,
                            os.path.basename(fname) + "_pred_" +
                            str(epoch_counter) + ".png"))

            # early stopping check
            if (current_best_val_loss - val_loss) > min_delta:
                current_best_val_loss = val_loss
                patience_counter = 0
                saver.save(sess, checkpoint_path, global_step=epoch_counter)
                print("save in %s" % checkpoint_path)
            else:
                patience_counter += 1

            # shuffle Xtrain and Ytrain in the next epoch
            idx = np.random.permutation(Xtrain.shape[0])
            Xtrain, Ytrain = Xtrain[idx, :, :, :], Ytrain[idx, :]

            # epoch end
            epoch_counter += 1

            ptrain.value = 0
            pval.value = 0
            bar_train.finish()
            bar_val.finish()

            print(
                "Epoch %s (%s), %s sec >> train loss: %.4f, train accu: %.4f, val loss: %.4f, val accu: %.4f"
                % (epoch_counter, patience_counter,
                   round(time.time() - stime,
                         2), train_loss, train_accu, val_loss, val_accu))
Ejemplo n.º 23
0
t_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(t_vars, print_info=True)

with tf.control_dependencies(
        tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='discriminator')):
    train_D = tf.train.AdamOptimizer(0.0002,
                                     beta1=0.5).minimize(model.loss_D,
                                                         var_list=model.vars_D)
with tf.control_dependencies(
        tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')):
    train_G = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(
        model.loss_G, global_step=model.global_step, var_list=model.vars_G)

sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

images, labels = read_images("data", "folder")
num_iters = len(images) // config.BATCH_SIZE

cnt = 0
length = 5
sample_noise = np.random.uniform(
    -1., 1., size=[length * length, 1, 1, config.LATENT_DIM])

with tf.Session(config=sess_config) as sess:
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)
    #summary_op = tf.summary.merge_all()
    init = tf.global_variables_initializer()
    sess.run(init)

    model_checkpoint_name = config.PATH_CHECKPOINT + "/model.ckpt"
    if config.IS_CONTINUE:
Ejemplo n.º 24
0
                else:
                    image_patch = img[i:(i + template_h), j:(j + template_w)]
                    image_template = template

                results[method][i, j] = value['func'](image_patch, image_template)

    # for each result get max(or min) and plot corresponding matching result
    for method, res in results.items():
        im_copy = img.copy()
        if method != 'Normalized Cross-Correlation':
            top_left = np.unravel_index(res.argmin(), res.shape)[::-1]
        else:
            top_left = np.unravel_index(res.argmax(), res.shape)[::-1]

        bottom_right = (top_left[0] + template_w, top_left[1] + template_h)
        cv2.rectangle(im_copy, top_left, bottom_right, 255, 2)
        print_matching_result(im_copy, res, method)


if __name__ == '__main__':
    image, template = read_images('0001.jpg', '0001_template.jpg', DATA_DIR)
    opencv_methods = ['cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF']
    custom_methods = {
        'Sum of Absolute Distances': {'func': sad, 'normalized': False},
        'Normalized Sum of Absolute Distances': {'func': sad, 'normalized': True},
        'Sum of Square Distances': {'func': ssd, 'normalized': False},
        'Normalized Cross-Correlation': {'func': ncc, 'normalized': True}
    }
    # template_matching_opencv(image, template, opencv_methods)
    template_matching_numpy(image, template, custom_methods)
Ejemplo n.º 25
0
requiredArgs = parser.add_argument_group('required arguments')
requiredArgs.add_argument(
    "-i",
    "--input",
    help="Folder containing an images/ folder and Calibration.xml",
    required=True)
parser.add_argument(
    "--rerun",
    action='store_true',
    help=
    "Use this flag if this is the second run of the script, first run stores the point grid visible which is used from that point on"
)

args = parser.parse_args()
calibrations = read_calibration(os.path.join(args.input, "Calibration.xml"))
images = read_images(os.path.join(args.input, "images"))
if not args.rerun:
    grid = generate_grid()
    grid = filter_grid(grid, calibrations)
    np.save('grid.npy', grid)
else:
    grid = np.load('grid.npy')
draw_points_on_images(grid, calibrations, images)

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = [
    '#808080', '#000000', '#FF0000', '#800000', '#808000', '#008000',
    '#00FFFF', '#008080', '#800080'
]
for i, calibration in enumerate(calibrations):
Ejemplo n.º 26
0
"""
SAA - self adaptive algorithm
FAS - fast adaptive similarity filter
Images from https://pixabay.com/images/search/
"""
import time

import utils
import constants
import NEAVF

start = time.time()

# Read, resize and plot images
size = (300, 400)
img_list = utils.read_images(constants.DATASET_PATH)
img_list = utils.resize_img(img_list, size)
# utils.show_image_list(img_list, "Initial images!")

img_list_gauss = []
img_list_impulsive = []
index = 0
for img in img_list:
    img_list_gauss.append(utils.gaussian_noise(img, 0.1))
    img_list_impulsive.append(utils.salt_and_pepper(img, 0.1))
    index += 1
# utils.show_image_list(img_list_gauss, "Images with gaussian noise! 0.1")
# utils.show_image_list(img_list_impulsive, "Image with salt and pepper noise! 0.1")

# Two images with 2 values of noise
img_list_gauss_005 = []
Ejemplo n.º 27
0
config.MODE = "train"

model = GANnomaly(config)

t_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(t_vars, print_info=True)

with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='discriminator')):
    train_D = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(model.loss_D, var_list=model.vars_D)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator') + tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='encoder')):
    train_G = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(model.loss_G, global_step=model.global_step, var_list=model.vars_G)


sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

images, labels = read_images(config.PATH_DATA, "folder")
num_iters = len(images) // config.BATCH_SIZE

test_images, test_labels = read_images(config.PATH_TEST, "folder")
test_num_iters = len(test_images) // config.BATCH_SIZE

cnt = 0
length = 6
best_auc = 0

scores_out = []
labels_out = []

with tf.Session(config=sess_config) as sess:
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=10000)
    init = tf.global_variables_initializer()
Ejemplo n.º 28
0
def run_experiment(params, dropout, bn):
    tf.reset_default_graph()

    figname_suffix = "a_"

    dropout_value = 1.0
    if dropout:
        dropout_value = params['dropout']
        figname_suffix += "dropout"
    if bn:
        figname_suffix += "bn"

    train_step, \
    cost, \
    accuracy, \
    y_pred, \
    y_pred_cls, \
    y_true_cls, \
    placeholders = create_network(
                                params['img_size'],
                                params['num_channels'],
                                params['num_classes'],
                                params['shape1'],
                                params['shape2'],
                                params['num_fc_layer1_output'],
                                params['num_fc_layer2_output'],
                                params['learning_rate'],
                                bn
                                )

    saver = tf.train.Saver()
    if not os.path.exists(params['save_dir']):
        os.makedirs(params['save_dir'])

    (train_acc, cost,
     test_acc) = train_network(params['data'], train_step, cost, accuracy,
                               params['num_iterations'],
                               params['train_batch_size'], dropout_value,
                               placeholders, saver, params['save_dir'],
                               params['plot_dir'], params['log_dir'],
                               params['display_step'], figname_suffix)

    cls_true, cls_pred, acc, y_pred_probs, x_all, y_all\
        = test_network(params['test_batch_size'],
                       placeholders,
                       1.0,
                       saver,
                       params['save_dir'],
                       accuracy,
                       y_pred,
                       y_pred_cls,
                       y_true_cls,
                       params['data'])

    plot_confusion_matrix(cls_true=cls_true,
                          cls_pred=cls_pred,
                          output_dir=plot_dir,
                          fig_name="confusion_matrix" + "_" + figname_suffix)

    plot_images(output_dir=plot_dir,
                fig_name="images" + "_" + figname_suffix,
                img_shape=(params['img_size'], params['img_size'],
                           params['num_channels']),
                images=x_all,
                cls_true=y_all,
                cls_pred=cls_pred,
                prob_pred=y_pred_probs,
                logits=None,
                y_pred=None)

    y_all_label = list(map(lambda x: np.argmax(x), y_all))
    chosen = list(
        filter(lambda x: x[0] == params['real_label'], zip(y_all_label,
                                                           x_all)))
    chosen_class = np.zeros(10)
    chosen_class[params['chosen_label']] = 1
    image_index = np.random.randint(len(chosen) - 1)
    x_image_list = create_plot_adversarial_images(
        x=placeholders['x'],
        keep_prob=placeholders['fc_layer1_keep_prob'],
        phase_train=placeholders['phase_train'],
        img_shape=(params['img_size'], params['img_size'],
                   params['num_channels']),
        x_image=chosen[image_index][1],
        y_conv=y_pred,
        y_label=chosen_class,
        lr=0.05,
        n_steps=12,
        saver=saver,
        output_dir=params['plot_dir'],
        save_dir=params['save_dir'],
        fig_name=str(chosen_label) + "_" + str(real_label),
    )

    # create a second network, train and test both
    train_step2, \
    cost2, \
    accuracy2, \
    y_pred2, \
    y_pred_cls2, \
    y_true_cls2, \
    placeholders2 = create_network2(
                                params['img_size'],
                                params['num_channels'],
                                params['num_classes'],
                                params['shape1'],
                                params['shape2'],
                                params['num_fc_layer1_output'],
                                params['num_fc_layer2_output'],
                                params['learning_rate'],
                                bn
                                )

    (train_acc2, cost2,
     test_acc2) = train_network(params['data'], train_step2, cost2, accuracy2,
                                params['num_iterations'],
                                params['train_batch_size'], dropout_value,
                                placeholders2, saver, params['save_dir2'],
                                params['plot_dir'], params['log_dir'],
                                params['display_step'], figname_suffix)
    if params['dataset'] == 'mnist':
        gray = True
    else:
        gray = False
    image_list, img_shape = read_images(
        params['plot_dir'] + str(chosen_label) + "_" + str(real_label) + "/",
        gray)
    xx_image_list = []
    for img in x_image_list:
        img = img.flatten().reshape(784)
        xx_image_list += [img]
    result1 = test_adversial_images(y_pred, saver, params['save_dir'],
                                    placeholders['x'],
                                    placeholders['fc_layer1_keep_prob'],
                                    placeholders['phase_train'], xx_image_list,
                                    img_shape)
    result2 = test_adversial_images(y_pred2, saver, params['save_dir2'],
                                    placeholders2['x'],
                                    placeholders2['fc_layer1_keep_prob'],
                                    placeholders2['phase_train'],
                                    xx_image_list, img_shape)

    fig = plt.figure(figsize=(25, 35))
    plt.clf()
    rows = 6
    cols = 4
    for i in range(1, len(result1)):
        ax = fig.add_subplot(rows, cols, i)
        ax.imshow(result1[i][2], cmap='binary')
        ax.set_title('Label1: {0} Prob1: {1}%'.format(result1[i][0],
                                                      result1[i][1]))

        ax = fig.add_subplot(rows, cols, i + 1)
        ax.imshow(result2[i][2], cmap='binary')
        ax.set_title('Label2: {0} Prob2: {1}%'.format(result2[i][0],
                                                      result2[i][1]))

        i += 2

    plt.savefig(params['plot_dir'] + str(chosen_label) + "_" +
                str(real_label) + "/compare.png")

    return train_acc, cost, test_acc, figname_suffix, acc
Ejemplo n.º 29
0
config = Config()
model = DCGAN(config)
config.BATCH_SIZE = 1

t_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(t_vars, print_info=True)

with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='discriminator')):
    train_D = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(model.loss_D, var_list=model.vars_D)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')):
    train_G = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(model.loss_G, global_step=model.global_step, var_list=model.vars_G)


sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

images, labels = read_images(config.PATH_DATA, "folder")
num_iters = len(images) // config.BATCH_SIZE

cnt = 0
length = 5
sample_noise = np.random.uniform(-1., 1., size=[length*length, 1, 1, config.LATENT_DIM])

img_path = "temp"
if not os.path.isdir("temp"):
    os.mkdir("temp")

with tf.Session(config=sess_config) as sess:
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)
    #summary_op = tf.summary.merge_all()
    init = tf.global_variables_initializer()
    sess.run(init)
	# set up detectors
	detector = BodyDetector(cascade_fn="../data/people/body10/haarcascade_fullbody.xml", scaleFactor=1.05,
							minNeighbors=1)
	# detector = BodyDetector(cascade_fn="../data/people/body10/haarcascade_upperbody.xml")
	# detector = BodyDetector(cascade_fn="../data/people/body10/haarcascade_lowerbody.xml")

	# detector = BodyDetector(cascade_fn="/Users/Utilizador/opencv-3.0.0/data/haarcascades/haarcascade_fullbody.xml", scaleFactor=1.05,
	# 						minNeighbors=1)
	# detector = BodyDetector(cascade_fn="/Users/Utilizador/opencv-3.0.0/data/haarcascades/haarcascade_upperbody.xml")
	# detector = BodyDetector(cascade_fn="/Users/Utilizador/opencv-3.0.0/data/haarcascades/haarcascade_lowerbody.xml")


	# rect = detector.detect(imgOut)

	[Names, X, y] = read_images("../data/people/INRIAPerson/Test/pos",sz=None)
	# [Names, X, y] = sorted(([Names, X, y]), key=lambda image:X[0])
	imgBGR = []

	#for all images in list X
	for imgName, imgOut in zip(Names,X):

		# detection
		for i,r in enumerate(detector.detect(imgOut)):
			x0,y0,x1,y1 = r
			imgBGR = cv2.cvtColor(imgOut,cv2.COLOR_GRAY2BGR)
			cv2.rectangle(imgBGR, (x0,y0),(x1,y1),(0,255,0),1)


		# display image or write to file
		cv2.imshow("People detected: " + imgName, imgBGR)
Ejemplo n.º 31
0
    print "starting...."

    # This is where we write the images, if an output_dir is given
    # in command line:
    out_dir = None
    # You'll need at least a path to your image data, please see
    # the tutorial coming with this source code on how to prepare
    # your image data:
    path = '/Users/matti/Documents/forritun/att_faces/'
    if len(sys.argv) > 1:
        path = sys.argv[1]

    print "reading images from " + path
    # Now read in the image data. This must be a valid path!
    # [X, y] = read_images(path)
    input_faces = utils.read_images(path)

    # use a random image
    # random.seed()
    # r = len(X)
    # random_index = random.randint(0, r-1)

    # print "using image ", random_index, " id: ", y[random_index]

    prufu_mynd = None

    # test data and label
    # prufu_mynd, tl = X[random_index], y[random_index]
    # and remove test from the data
    # del X[random_index]
    # del y[random_index]
Ejemplo n.º 32
0
id = '#1'
TAG = '_6class_T2'
DIM = '10x10'
CONSTRUCTION_METHOD = 'stacked'  # 'image' or 'matrix' or 'stacked'
CSV_FILEPATH = 'data/test_labels' + TAG + '.txt'
DATA_FILEPATH = 'data/test/complexbaseband/' + DIM + TAG + '/'
NUMBER_OF_CLASSES = 6
WEIGHTS = 'CNN/models/weights_' + DIM + '_' + id + '.best.hdf5'
MODEL = 'CNN/models/model_' + DIM + '_' + id + '.json'
object_classes = ['noObject', 'circle', 'square', 'rect', 'Lrect', 'triangle']

train_df, input_shape = data_from_csv_nonimage(CSV_FILEPATH, DATA_FILEPATH)

if CONSTRUCTION_METHOD == 'image':
    train_df, input_shape = data_from_csv(CSV_FILEPATH, DATA_FILEPATH)
    x_test_channelb = read_images(train_df.channelb.values, input_shape)
    x_test_channelc = read_images(train_df.channelc.values, input_shape)
    x_test_channeld = read_images(train_df.channeld.values, input_shape)
elif CONSTRUCTION_METHOD == 'matrix':
    train_df, input_shape = data_from_csv_nonimage(CSV_FILEPATH, DATA_FILEPATH)
    x_test_channelb = read_data(train_df.channelb.values, input_shape)
    x_test_channelc = read_data(train_df.channelc.values, input_shape)
    x_test_channeld = read_data(train_df.channeld.values, input_shape)
else:
    train_df, input_shape = data_from_csv_nonimage_6channel(
        CSV_FILEPATH, DATA_FILEPATH)
    x_test_channelbcd = read_data(train_df.channelbcd.values, input_shape)

labels = train_df.object.values
labels = keras.utils.to_categorical(labels, NUMBER_OF_CLASSES)
Ejemplo n.º 33
0
import utils
from neuralnetwork import NeuralNetworkModel
import autograd.numpy as np
import pickle
import matplotlib.pyplot as plt
from utils import plt_image

TRAINING_IMAGES_PATH = 'mnistset/train-images-idx3-ubyte.gz'
TRAINING_LABELS_PATH = 'mnistset/train-labels-idx1-ubyte.gz'
TESTING_IMAGES_PATH = 'mnistset/t10k-images-idx3-ubyte.gz'
TESTING_LABELS_PATH = 'mnistset/t10k-labels-idx1-ubyte.gz'
MODELS_PATH = 'mnistmodels/'

print 'Reading images from %s' % TRAINING_IMAGES_PATH
images = utils.read_images(TRAINING_IMAGES_PATH)
print 'Reading labels from %s' % TRAINING_LABELS_PATH
labels = utils.read_labels(TRAINING_LABELS_PATH)
print 'Reading images from %s' % TESTING_IMAGES_PATH
testing_images = utils.read_images(TESTING_IMAGES_PATH)
print 'Reading labels from %s' % TESTING_LABELS_PATH
testing_labels = utils.read_labels(TESTING_LABELS_PATH)
training_images = images[5000:-5000]
training_labels = labels[5000:-5000]
validating_images = np.concatenate([images[:5000], images[-5000:]])
validating_labels = np.concatenate([labels[:5000], labels[-5000:]])

np.set_printoptions(suppress=True)
nnm = None

Ejemplo n.º 34
0
import utils
from neuralnetwork import NeuralNetworkModel
import autograd.numpy as np
import pickle
import matplotlib.pyplot as plt
from utils import plt_image

TRAINING_IMAGES_PATH = 'mnistset/train-images-idx3-ubyte.gz'
TRAINING_LABELS_PATH = 'mnistset/train-labels-idx1-ubyte.gz'
TESTING_IMAGES_PATH = 'mnistset/t10k-images-idx3-ubyte.gz'
TESTING_LABELS_PATH = 'mnistset/t10k-labels-idx1-ubyte.gz'
MODELS_PATH = 'mnistmodels/'

print 'Reading images from %s' % TRAINING_IMAGES_PATH
images = utils.read_images(TRAINING_IMAGES_PATH)
print 'Reading labels from %s' % TRAINING_LABELS_PATH
labels = utils.read_labels(TRAINING_LABELS_PATH)
print 'Reading images from %s' % TESTING_IMAGES_PATH
testing_images = utils.read_images(TESTING_IMAGES_PATH)
print 'Reading labels from %s' % TESTING_LABELS_PATH
testing_labels = utils.read_labels(TESTING_LABELS_PATH)
training_images = images[5000:-5000]
training_labels = labels[5000:-5000]
validating_images = np.concatenate([images[:5000], images[-5000:]])
validating_labels = np.concatenate([labels[:5000], labels[-5000:]])

np.set_printoptions(suppress=True)
nnm = None

Ejemplo n.º 35
0
            discriminator_costs.append(discriminator_cost)

            display.clear_output(wait=True)
            print('Epoch: {}'.format(epoch+1))
            print('Generator loss: {}'.format(generator_loss))
            print('Discriminator loss: {}'.format(discriminator_loss))
            noise = tf.random.normal([9, noise_size])
            generate_print_save(generator, noise, epoch)

    # plot the learning curves of the generator and discriminator
    plt.plot(np.squeeze(generator_costs))
    plt.plot(np.squeeze(discriminator_costs))
    plt.show()

# download images
train_set = read_images(data_dir='data/*.png')

# let us look at few images from the dataset
print_images(images=train_set)

# setup number of channels, noise size, learning rate and beta_1
channels = 3
noise_size = 100
learning_rate = 0.0001
beta_1 = 0.5

# create an instance of generator and discriminator, their optimizers and checkpoints
generator, generator_optimizer, generator_checkpoint = create_generator(channels, noise_size, learning_rate, beta_1)
discriminator, discriminator_optimizer, discriminator_checkpoint = create_discriminator(channels, learning_rate, beta_1)

# train generator and discriminator