Ejemplo n.º 1
0
def main(flags):
    model = flags.model
    image = flags.image

    saver = tf.train.import_meta_graph(model + ".meta")
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config)
    saver.restore(sess, model)
    X, mode = tf.get_collection("inputs")
    pred = tf.get_collection("outputs")[0]

    print('loading image')
    proj, geotrans, input_img = read_img(image)
    input_img = input_img[:, :, :3]
    input_img = np.asarray(input_img, dtype='uint8')

    label_pred = predict_img_with_smooth_windowing(
        input_img,
        window_size=256,
        subdivisions=2,
        batch_size=256,
        pred_func=(
            lambda img: sess.run(pred, feed_dict={X: img, mode: False})
        )
    )
    label_pred = label_pred[:, :, 0]
    label_pred[np.where(label_pred >= 0.5)] = 1
    label_pred[np.where(label_pred < 0.5)] = 0
    label_pred = label_pred.astype(np.uint8)

    prd_name = "%s_%s_m%s_prd.tif" % (image[:-4], model.split('/')[0], model[-7:-5])
    write_img(prd_name, proj, geotrans, label_pred)
Ejemplo n.º 2
0
def to_png(input_dir="../../input/",
           output_dir="../../input/",
           input_img_name="input.jpg",
           output_img_name="input.png"):

    input_dir = input_dir
    output_dir = output_dir
    inp_img_path = input_dir + input_img_name
    out_img_path = output_dir + output_img_name

    img = Image.open(inp_img_path)
    utils.write_img(out_img_path, img)
Ejemplo n.º 3
0
def to_bw(input_dir="../../input/",
          output_dir="../../input/",
          input_img_name="input2.png",
          out_img_name="input2_bw.png"):
    input_dir = input_dir
    output_dir = output_dir
    inp_img_path = input_dir + input_img_name
    out_img_path = output_dir + out_img_name

    img = Image.open(inp_img_path)
    # img_np = utils.to_numpy(img)
    # print(img_np[0])
    img_bw = img.convert('1')
    utils.write_img(out_img_path, img_bw)
    utils.preview_img(img_bw)
Ejemplo n.º 4
0
def resize(input_dir="../../input/",
           output_dir="../../input/",
           input_img_name="input2_org.jpg",
           output_img_name="input2.png"):
    size = (512, 512)
    input_dir = input_dir
    output_dir = output_dir
    inp_img_path = input_dir + input_img_name
    out_img_path = output_dir + output_img_name

    img = Image.open(inp_img_path)
    print(img.size)
    img_new = img.resize(size)
    utils.write_img(out_img_path, img_new)
    print(img_new.size)
Ejemplo n.º 5
0
def save_output(output, dset_name, chkpt_num, fwd_dir, output_tag, **params):
    """ Saves the volumes within a DataProvider ForwardScanner """

    for k in output.outputs.data:

        output_data = output.outputs.get_data(k)

        if len(output_tag) == 0:
            basename = "{}_{}_{}.tif".format(dset_name, k, chkpt_num)
        else:
            basename = "{}_{}_{}_{}.tif".format(dset_name, k, chkpt_num,
                                                output_tag)

        full_fname = os.path.join(fwd_dir, basename)

        utils.write_img(output_data, full_fname)
Ejemplo n.º 6
0
def generate_baseline_img(small_imgs, input_img):
    new_img = input_img.copy()
    # new_img[:][:] = (0,50,0)

    num_rows_small = small_img_size[0]
    num_cols_small = small_img_size[1]
    num_rows = big_img_size[0] // num_rows_small
    num_cols = big_img_size[1] // num_cols_small

    for r in tqdm(range(num_rows)):
        r_idx = r * num_rows_small
        r_idx_lim = r_idx + num_rows_small
        for c in tqdm(range(num_cols)):
            c_idx = c * num_cols_small
            c_idx_lim = c_idx + num_cols_small
            # print(r_idx, r_idx_lim)
            # print(c_idx, c_idx_lim)
            # print(r,c)

            section_img = input_img[r_idx:r_idx_lim, c_idx:c_idx_lim]
            mn_err = 1000000000
            mn_idx = 0
            mn_img = None
            for i, img in tqdm(enumerate(small_imgs)):
                imgtmp = utils.to_img(img)
                # utils.preview_img(imgtmp)
                error = calc_error(section_img, img)
                if (error < mn_err):
                    # print("min")
                    mn_err = error
                    mn_idx = i
                    mn_img = img
                # break
            # print(mn_idx)
            # mn_img_tmp = utils.to_img(mn_img)
            # utils.preview_img(mn_img_tmp)
            new_img[r_idx:r_idx_lim, c_idx:c_idx_lim] = mn_img
            gen_img = utils.to_img(new_img)
            utils.write_img(out_img_path, gen_img)

            # break
        # break

    gen_img = utils.to_img(new_img)
    return gen_img
Ejemplo n.º 7
0
def train_model(model, train_loader, epoch, optimizer, writer, opts):
    n_classes = model.n_classes
    metric = nn.CrossEntropyLoss()

    y_probs = torch.zeros(0, n_classes, 512, 512)
    y_trues = torch.zeros(0, 512, 512).long()
    epoch_loss = 0
    model.train()

    for i, (image, mask) in enumerate(train_loader):
        optimizer.zero_grad()

        if torch.cuda.is_available():
            image = image.cuda()
            mask = mask.cuda()

        prediction = model.forward(image)

        # For the torchvision models, an OrderedDict is returned
        if isinstance(prediction, OrderedDict):
            prediction = prediction['out']

        loss = metric(prediction, mask)
        loss.backward()
        optimizer.step()

        epoch_loss += loss.item()

        y_prob = F.softmax(prediction.cpu(), dim=1)
        y_probs = torch.cat([y_probs, y_prob.detach().cpu()])
        y_trues = torch.cat([y_trues, mask.cpu()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    utils.write_img(y_probs, y_trues, epoch, writer, is_train=True)

    writer.add_scalar('Training loss', epoch_loss, epoch)
    writer.add_scalar('Training accuracy', metric_collects['accuracy'], epoch)
    writer.add_scalar('Training miou', metric_collects['miou'], epoch)

    return epoch_loss, metric_collects
Ejemplo n.º 8
0
def evaluate_model(model, val_loader, epoch, writer, opts):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()

    y_probs = torch.zeros(0, n_classes, 512, 512)
    y_trues = torch.zeros(0, 512, 512).long()
    epoch_loss = 0

    for i, (image, mask) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            mask = mask.cuda()

        prediction = model.forward(image)

        # For the torchvision models, an OrderedDict is returned
        if isinstance(prediction, OrderedDict):
            prediction = prediction['out']

        loss = metric(prediction, mask)

        epoch_loss += loss.item()

        y_prob = F.softmax(prediction, dim=1)
        y_probs = torch.cat([y_probs, y_prob.detach().cpu()])
        y_trues = torch.cat([y_trues, mask.cpu()])

    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    utils.write_img(y_probs, y_trues, epoch, writer, is_train=False)

    writer.add_scalar('Validation loss', epoch_loss, epoch)
    writer.add_scalar('Validation accuracy', metric_collects['accuracy'],
                      epoch)
    writer.add_scalar('Validation miou', metric_collects['miou'], epoch)

    return epoch_loss, metric_collects
Ejemplo n.º 9
0
    def train(self, tmp_img_path=None):
        self.output_img = None
        score_iteration_list = []

        for i in tqdm(range(self.num_iterations)):
            # print("--------------------------------------------------------")
            # print("#{:} iteration".format(i+1))
            # print("----------------------- Fitness -----------------------")
            self._fitness()
            # print("----------------------- Selection -----------------------")
            self._selection()
            # print("----------------------- Crossover -----------------------")
            self._crossover()
            # print("----------------------- Mutation -----------------------")
            self._mutation()
            # print("----------------------- Termination -----------------------")
            termination = self._termination()
            mn_img_raw = self.current_population[termination[1]]["img"]
            mn_img_np = mn_img_raw.construct_img()
            mn_img = utils.to_img(mn_img_np)
            self.output_img = mn_img
            self.progress_imgs.append(mn_img)
            # if(i%50 == 0):
            print(self.current_population[termination[1]]["score"])
            score_iteration_list.append(
                self.current_population[termination[1]]["score"])
            if (tmp_img_path is not None):
                utils.write_img(tmp_img_path, self.output_img)
            if (termination[0]):
                self.output_img = utils.to_img(self.current_population[
                    termination[1]]["img"].construct_img())
                self.progress_imgs.append(self.output_img)
                print("## Termination satisfied ")
                break
        print(score_iteration_list)
        return self.progress_imgs, self.output_img, score_iteration_list[-1]
Ejemplo n.º 10
0
# ------------------------------------------------------------------------
img = utils.read_img(inp_img_path)

imgs = utils.read_small_imgs(assets_dir=assets_dir,
                             num=small_imgs_num,
                             format_str="{:05d}.png")
# indexes = []
# for i in range(64):
#     indexes.append([np.random.randint(10000) for i in range(64)])

# imgt = utils.Image(imgs=imgs, index=indexes)

# img = imgt.construct_img()

# img = utils.to_img(img)

# print(img.size)
# utils.preview_img(img)

# ------------------------------------------------------------------------

img_np = utils.to_numpy(img)

time1 = time()
out_img = generate_baseline_img(imgs, img_np)
time2 = time()
print("Process time: {:}".format(time2 - time1))
utils.preview_img(out_img, title="Baseline_img")
utils.write_img(out_img_path, out_img)
        print(np.linalg.norm(emb - dist_mean))
        stats = np.percentile(dist, [10, 30, 50, 70, 90])
        return stats


if __name__ == '__main__':
    model = Model()
    dirs = os.listdir(IMAGE_DIR)
    for index in range(len(dirs)):
        image_name = dirs[index]
        print("正处理第%d张图片..." % index)
        origin_img = read_img(os.path.join(IMAGE_DIR, image_name))  # 读入原图片
        # show_image(origin_img)

        # 计算原图片embedding
        origin_emb = model.eval_embeddings([origin_img])
        model.set_victim_embeddings(origin_emb)
        model.build_pgd_attack(EPSILON)

        # 生成对抗样本
        out = model.eval_attack(origin_img).astype(np.int)
        # show_image(out)

        # 比较原图片与输出图片差异
        print("embedding之间差距为:%f" % distance_between_img(model, origin_img, out))
        evaluate_difference(out, origin_img)
        # show_difference(out,origin_img)

        # 保存对抗样本
        write_img(os.path.join(RESULT_DIR, image_name), out)