Exemplo n.º 1
0
def analyse_activations(model, inputs, layer, image_idx):
    model.eval()
    for parameter in model.parameters():
        parameter.requires_grad_(False)

    outputs = []

    def track_outputs(_, __, output):
        print("track_outputs was called")
        outputs.append(output)

    hook = layer[0].register_forward_hook(track_outputs)

    with torch.no_grad():
        predictions = model(inputs)

    print([output.shape for output in outputs])

    image = outputs[0][image_idx]

    activation = image.norm(2, dim=0)

    util.show_activation(activation)
    plt.show()

    out = torchvision.utils.make_grid(inputs)
    util.imshow(out)
    plt.show()

    hook.remove()
Exemplo n.º 2
0
def segment_func(image, model_name, display=True):
    print("Loading model ...")
    model = pickle.load(open('data/{}-jsrt140n-model.pkl'.format(model_name), 'rb'))
    model.join_masks = True

    print("Segment input ...")
    image = cv2.resize(image, SEGMENTATION_IMAGE_SHAPE, interpolation=cv2.INTER_CUBIC)
    mask = model.transform(np.array([image]))
    if display:
        boundaries = find_boundaries(mask)[0]
        tmp = np.full(boundaries.shape, dtype=np.bool, fill_value=False)
        b1 = np.array([boundaries, tmp, tmp])
        b2 = np.array([boundaries, boundaries, boundaries])
        b1 = np.swapaxes(b1, 0, 2)
        b1 = np.swapaxes(b1, 0, 1)
        b2 = np.swapaxes(b2, 0, 2)
        b2 = np.swapaxes(b2, 0, 1)

        util.imwrite_as_pdf('data/original', image)

        image = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
        max_value = np.max(image)
        image[b2] = 0.0
        image[b1] = max_value

        util.imwrite_as_pdf('data/segmented', image)
        print 'mask shape', mask.shape, mask.dtype
        util.imwrite_as_pdf('data/mask', mask[0])
        util.imshow('Segment with model {}'.format(model_name), image)
    return mask
Exemplo n.º 3
0
    def train_epoch(self, epoch):
        # self.scheduler.step(epoch)
        self.model.train()  # Set model to training mode
        # is_inception = False
        running_loss = 0.0
        running_corrects = 0
        padding = 5
        
        # Iterate over data.
        for inputs, labels, video_names in tqdm(self.dataloaders["train"]): #inputs, labels:  <class 'torch.Tensor'> torch.Size([64, 3, 224, 224]) <class 'torch.Tensor'> torch.Size([64])
            # print('inputs, labels: ',type(inputs),inputs.size(), type(labels), labels.size())
            # print(video_names)
            if self.plot_samples:
                print(video_names)
                plt.figure(figsize=(10,12))
                images = torchvision.utils.make_grid(inputs.cpu().data, padding=padding)
                imshow(images, video_names)
                dyImg = dynamicImage.computeDynamicImages(str(video_names[0]), self.numDynamicImages,16)
                dis = torchvision.utils.make_grid(dyImg.cpu().data, padding=padding)
                # print(video_names[0])
                plt.figure(figsize=(10,12))
                imshow(dis, 'RAW - '+str(video_names[0]))
            if self.numDynamicImages > 1:
                # print('==== dataloader size: ',inputs.size()) #[batch, ndi, ch, h, w]
                inputs = inputs.permute(1, 0, 2, 3, 4)
            inputs = inputs.to(self.device)
            labels = labels.to(self.device)
            # zero the parameter gradients
            self.optimizer.zero_grad()
            # track history if only in train
            with torch.set_grad_enabled(True):    
                outputs = self.model(inputs)
                # print('-- outputs size: ', outputs.size(), outputs)
                # print('-- labels size: ',labels.size(), labels)
                loss = self.criterion(outputs, labels)
                _, preds = torch.max(outputs, 1)
                # backward + optimize only if in training phase
                loss.backward()
                self.optimizer.step()
                # self.scheduler.step(epoch)
            # statistics
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(self.dataloaders["train"].dataset)
        epoch_acc = running_corrects.double() / len(self.dataloaders["train"].dataset)

        print("{} Loss: {:.4f} Acc: {:.4f}".format('train', epoch_loss, epoch_acc))
        if self.train_type == constants.OPERATION_TRAINING_FINAL:
            if epoch_acc > self.train_best_acc:
                self.train_best_acc = epoch_acc.item()
                checkpoint_name = self.checkpoint_path+'.pth'
                print('Saving FINAL model...',checkpoint_name)
                torch.save(self.model, checkpoint_name)
        # self.tb.save_value("trainLoss", "train_loss", epoch, epoch_loss)
        # self.tb.save_value("trainAcc", "train_acc", epoch, epoch_acc)
        return epoch_loss, epoch_acc
Exemplo n.º 4
0
def train(args, model, sess, saver):
    
    if args.fine_tuning :
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for fine-tuning!")
        print("model path is %s"%(args.pre_trained_model))
        
    num_imgs = len(os.listdir(args.train_Sharp_path))
    
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs',sess.graph)
#    if args.test_with_train:
#        f = open("valid_logs.txt", 'w')
    
    epoch = 1
    step = num_imgs // args.batch_size
    
    
    blur_imgs = util.image_loader(args.train_Blur_path, args.load_X, args.load_Y)
    sharp_imgs = util.image_loader(args.train_Sharp_path, args.load_X, args.load_Y)
    
    while epoch <= args.max_epoch:
        random_index = np.random.permutation(len(blur_imgs))
        for k in range(step):
            s_time = time.time()
            blur_batch, sharp_batch = util.batch_gen(blur_imgs, sharp_imgs, args.patch_size, args.batch_size, random_index, k, args.augmentation)
            Knoise = np.random.randn(args.batch_size,64)
            for t in range(args.critic_updates):
                _, D_loss = sess.run([model.D_train, model.D_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            if (k+1) % args.log_freq == 0:
                _, G_loss,gene_K,gene_img,reg_loss,D_loss,G_loss,gp_loss = sess.run([model.G_train, model.G_loss,model.gene_K,model.gene_img,model.reg_loss,model.D_loss,model.G_loss,model.gp_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
                gene_K=util.normalized(gene_K)
                gene_img=util.normalized(gene_img)
                util.imshow(gene_K[0,:,:,0],cmap='gray')
                toshow = np.hstack((sharp_batch[0]/255.0,blur_batch[0]/255.0,gene_img[0]))
                util.imshow(toshow)
                print("training with %d epoch %d/%d batch, D_loss: %0.2f, gp_loss: %0.2f, G_loss: %0.2f, reg_loss: %0.2f "%(epoch,k+1,step,D_loss,gp_loss,G_loss,reg_loss))
            else:
                _, G_loss = sess.run([model.G_train, model.G_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            e_time = time.time()
        
#        if epoch % args.log_freq == 0:
        summary = sess.run(merged, feed_dict = {model.blur : blur_batch, model.sharp: sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
        train_writer.add_summary(summary, epoch)
#            if args.test_with_train:
#                test(args, model, sess, saver, f, epoch, loading = False)
        print("%d training epoch completed" %(epoch))
        print("D_loss : %0.4f, \t G_loss : %0.4f"%(D_loss, G_loss))
        print("Elpased time : %0.4f"%(e_time - s_time))
        saver.save(sess, './model/DeblurrGAN', global_step = epoch, write_meta_graph = False)
        
        epoch += 1

    saver.save(sess, './model/DeblurrGAN_last', write_meta_graph = False)
Exemplo n.º 5
0
Arquivo: split.py Projeto: apacha/mscr
 def show(self, lw=4, off=16):
     colors = ((0, 255, 0), (255, 0, 0), (0, 0, 0), ())
     tmp = cv2.cvtColor(self._img, cv2.COLOR_GRAY2RGB)
     for bl in self._blocks:
         pred, prob = bl.cls, bl.prob
         p1, p2 = bl.box[0], (bl.box[1][0] - lw, bl.box[1][1] - lw)
         cv2.putText(tmp, str(prob), (p1[1] + off, p1[0] + int(2.5 * off)),
                     cv2.FONT_HERSHEY_PLAIN, 2, colors[pred], 2)
         cv2.rectangle(tmp, p1[::-1], p2[::-1], colors[pred], lw)
     imshow(tmp)
Exemplo n.º 6
0
Arquivo: grid.py Projeto: fpeder/mscr
 def show(self, pred=[]):
     tmp = gray2rgb(self._img)
     if len(pred) > 0:
         for pt, p in zip(self._pts, pred):
             color = self.COLORS[p]
             cv2.circle(tmp, (pt[1], pt[0]), self.R, color, -1)
     else:
         for pt in self._pts:
             cv2.circle(tmp, (pt[1], pt[0]), self.R, self.COLORS[1], -1)
     imshow(tmp)
Exemplo n.º 7
0
Arquivo: split.py Projeto: fpeder/mscr
 def show(self, lw=4, off=16):
     colors = ((0, 255, 0), (255, 0, 0), (0, 0, 0), ())
     tmp = cv2.cvtColor(self._img, cv2.COLOR_GRAY2RGB)
     for bl in self._blocks:
         pred, prob = bl.cls, bl.prob
         p1, p2 = bl.box[0], (bl.box[1][0] - lw, bl.box[1][1] - lw)
         cv2.putText(tmp, str(prob), (p1[1] + off, p1[0] + int(2.5*off)),
                     cv2.FONT_HERSHEY_PLAIN, 2, colors[pred], 2)
         cv2.rectangle(tmp, p1[::-1], p2[::-1], colors[pred], lw)
     imshow(tmp)
Exemplo n.º 8
0
 def show(self, pred=[]):
     tmp = gray2rgb(self._img)
     if len(pred) > 0:
         for pt, p in zip(self._pts, pred):
             color = self.COLORS[p]
             cv2.circle(tmp, (pt[1], pt[0]), self.R, color, -1)
     else:
         for pt in self._pts:
             cv2.circle(tmp, (pt[1], pt[0]), self.R, self.COLORS[1], -1)
     imshow(tmp)
Exemplo n.º 9
0
def __main__():
    parser = argparse.ArgumentParser()
    parser.add_argument("--framesPath", type=str)
    parser.add_argument("--numDynamicImages", type=int)
    args = parser.parse_args()
    videoPath = args.framesPath
    videoPath = os.path.join(constants.PATH_UCFCRIME2LOCAL_FRAMES_REDUCED,
                             videoPath)
    numDynamicImages = args.numDynamicImages
    sequences = getSequences(videoPath, numDynamicImages)
    print(len(sequences))
    dynamicImages = []

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.4770381, 0.4767955, 0.4773611],
                             [0.11147115, 0.11427314, 0.11617025])
    ])

    for seq in sequences:
        # if len(seq) == seqLen:
        frames = []
        for frame in seq:
            img_dir = os.path.join(videoPath, frame)
            # print(img_dir)
            img = Image.open(img_dir).convert("RGB")
            img = np.array(img)
            frames.append(img)
        imgPIL, img = getDynamicImage(frames)
        imgPIL = transform(imgPIL)
        dynamicImages.append(imgPIL)

    dinamycImages = torch.stack(dynamicImages, dim=0)

    images = torchvision.utils.make_grid(dinamycImages.cpu().data, padding=10)
    util.imshow(images, '')

    # imgPIL = self.spatial_transform(imgPIL.convert("RGB"))


# __main__()
Exemplo n.º 10
0
def train_and_save(model, tr_images, tr_landmarks, te_images, model_name):
    print "Fit model ... tr {}, te {}".format(len(tr_images), len(te_images))
    model.fit(tr_images, tr_landmarks)

    print "\nRun model on test set ..."
    pred_masks = model.transform(te_images)

    for i in range(len(te_images)):
        image = te_images[i]
        boundaries = find_boundaries(pred_masks[i])
        max_value = np.max(image)
        image[boundaries] = max_value
        util.imshow('Segment with model {}'.format(model_name), image)

    print "Save model ...".format(len(te_images))
    model_file = open('data/{}.pkl'.format(model_name), 'wb')
    pickle.dump(model, model_file, -1)
    model_file.close()

    print "Save masks ..."
    np.save('data/{}-pred-masks'.format(model_name), np.array(pred_masks))
Exemplo n.º 11
0
def visualize_model(model, num_images=6):
    was_training = model.training
    model.eval()
    images_so_far = 0
    fig = plt.figure()

    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['val']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            for j in range(inputs.size()[0]):
                images_so_far += 1
                ax = plt.subplot(num_images // 2, 2, images_so_far)
                ax.axis('off')
                ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                imshow(inputs.cpu().data[j])
                if images_so_far == num_images:
                    model.train(mode=was_training)
                    return
        model.train(mode=was_training)
Exemplo n.º 12
0
def train(loss_func,
          extractor,
          initial_gradients,
          epochs=20,
          steps_per_epoch=100):
    # variable to optimize
    transfer = tf.Variable(initial_gradients)

    opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)

    @tf.function()
    def train_step(image):
        with tf.GradientTape() as tape:
            loss = loss_func(image)

        grad = tape.gradient(loss, image)
        opt.apply_gradients([(grad, image)])
        image.assign(clip_0_1(image))

    start = time.time()

    step = 0
    for n in range(epochs):
        for m in range(steps_per_epoch):
            step += 1
            train_step(transfer)
            print(".", end='')
        display.clear_output(wait=True)
        imshow(transfer.read_value())
        plt.title("Train step: {}".format(step))
        plt.show()

    end = time.time()
    print("Total time: {:.1f}".format(end - start))

    return transfer
    #net = torch.load("siameseNet.model")

    folder_dataset_test = dset.ImageFolder(
        root=config.get("dir", "testing_dir"))
    siamese_dataset = SiameseNetworkDataset(
        imageFolderDataset=folder_dataset_test,
        transform=transforms.Compose(
            [transforms.Resize((100, 100)),
             transforms.ToTensor()]),
        should_invert=False)

    test_dataloader = DataLoader(siamese_dataset,
                                 num_workers=0,
                                 batch_size=1,
                                 shuffle=False)
    dataiter = iter(test_dataloader)
    x0, _, _ = next(dataiter)

    for i in range(12):
        _, x1, _ = next(dataiter)
        concatenated = torch.cat((x0, x1), 0)

        output1, output2 = net(Variable(x0), Variable(x1))
        euclidean_distance = F.pairwise_distance(output1, output2)
        # similarity = 20/(euclidean_distance.item()+0.035)
        # if similarity > 100:
        #     similarity = 100.0
        # imshow(torchvision.utils.make_grid(concatenated), 'Similarity: {:.2f}%'.format(similarity))
        imshow(torchvision.utils.make_grid(concatenated),
               'Similarity: {:.2f}'.format(euclidean_distance.item()))
Exemplo n.º 14
0
 def show(self):
     imshow(self._tmp)
Exemplo n.º 15
0
    elif method == 'lce':
        return lce(**kwargs)
    elif method == 'norm':
        return normalize(**kwargs)
    elif method == 'norm3':
        norm = normalize(**kwargs)
        return [norm, norm, norm]
    elif method == 'heq':
        return equalize_hist(**kwargs)
    elif method == 'nlm':
        return denoise_nl_means(**kwargs)
    elif method == 'max_white':
        return max_white(**kwargs)
    elif method == 'stretch':
        return stretch(**kwargs)
    elif mehtod == 'grey_world':
        return grey_world(**kwargs)
    elif method == 'retinex':
        return retinex(**kwargs)
    elif method == 'retinex_adjust':
        return retinex_adjust(**kwargs)

PREPROCESS_METHODS_WITH_MIN_MAX = ['stretch', 'max_white', 'grey_world', 'retinex', 'retinex_adjust']

if __name__ == '__main__':
    image = np.load('../dbs/lidc-idri-npy/LIDC0014.npy')
    rois = np.load('../dbs/lidc-idri-npy/LIDC0014-rois.npy')
    util.imshow('roi', image, display_shape=(256, 256))
    sbf = sliding_band_filter(image, num_rays=256, rad_range=(2,21), band=3)
    util.imshow('SBF image', sbf, display_shape=(256, 256))
Exemplo n.º 16
0
    def lbpio(self, img, lung_mask, blobs, masks, method='uniform', mode='inner_outer'):
        P = 9
        R = 1
        feature_vectors = []

        mag, dx, dy = finite_derivatives(img)

        for i in range(len(blobs)):
            x, y, r = blobs[i]
            shift = 0 
            side = 2 * shift + 2 * r + 1

            tl = (x - shift - r, y - shift - r)
            ntl = (max(0, tl[0]), max(0, tl[1]))
            br = (x + shift + r + 1, y + shift + r + 1)
            nbr = (min(img.shape[0], br[0]), min(img.shape[1], br[1]))

            img_roi = img[ntl[0]:nbr[0], ntl[1]:nbr[1]]
            dx_roi = dx[ntl[0]:nbr[0], ntl[1]:nbr[1]]
            dy_roi = dy[ntl[0]:nbr[0], ntl[1]:nbr[1]]
            mag_roi = mag[ntl[0]:nbr[0], ntl[1]:nbr[1]]

            side = img_roi.shape[0]
            rx = -1 * np.linspace(-1 * (side/2), side/2, side)
            ry = -1 * np.linspace(-1 * (side/2), side/2, side)
            ry, rx = np.meshgrid(rx, ry)
            phase = angle2((rx, ry), (dx_roi, dy_roi)) 

            util.imshow('roi', img_roi)
            util.imshow('phase', phase)
            util.imshow('mask', masks[i])

            lbp = feature.local_binary_pattern(phase, P, R, method=method)

            mask = masks[i].astype(np.float64)
            imask = 1 - mask

            bins = lbp.max() + 1
            hi = []
            ho = []
            if mode == 'inner' or mode == 'inner_outer':
                hi, _ = np.histogram(lbp.ravel(), bins=bins, range=(0, bins), weights=mask.ravel(), density=True)
                hi /= (np.sum(hi) + util.EPS)
            if mode == 'outer' or mode == 'inner_outer':
                ho, _ = np.histogram(lbp.ravel(), bins=bins, range=(0, bins), weights=imask.ravel(), density=True)
                ho /= (np.sum(hi) + util.EPS)
            
            #print "hi shape {} sum {}".format(hi.shape, util.EPS)
            hist = []
            if mode == 'inner_outer':
                hist = np.hstack((hi, ho))
                hist /= (np.sum(hist) + util.EPS)
            elif mode == 'inner':
                hist = hi
            elif mode == 'outer':
                hist = ho
            elif mode == 'default':
                hist, _ = np.histogram(lbp.ravel(), bins=bins, range=(0, bins), density=True)
                hist /= (np.sum(hist) + util.EPS)

            feature_vectors.append(np.array(hist))

        return np.array(feature_vectors)
Exemplo n.º 17
0
 def show(self):
     imshow(self._tmp)
Exemplo n.º 18
0
outputs += grads
f_outputs = K.function([combination_image], outputs)

evaluator = Evaluator(height, width, f_outputs)

x = np.random.uniform(0, 255, (1, height, width, 3)) - 128.

prev_loss = None
loss_percentage = 100
i = 0

while (loss_percentage > 5 and i < 9):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)
    print('Current loss value:', min_val)
    end_time = time.time()
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
    if (prev_loss != None):
        loss_percentage = util.calculate_loss_drop_percentage(
            min_val, prev_loss)
        print('Loss drop percentage: ', loss_percentage, '%')
    prev_loss = min_val
    i += 1

x = util.deprocess_image(x, height, width)
util.imshow(x)
Exemplo n.º 19
0
}

data_dir = 'data/hymenoptera_data'
image_datasets = {
    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
    for x in ['train', 'val']
}
dataloaders = {
    x: torch.utils.data.DataLoader(image_datasets[x],
                                   batch_size=4,
                                   shuffle=True,
                                   num_workers=4)
    for x in ['train', 'val']
}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes

# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))

# Make a grid from batch
out = torchvision.utils.make_grid(inputs)

print("Saving sample images prepare-samples.png")
imshow(out, title=[class_names[x] for x in classes])
plt.savefig("prepare-samples.png")

for name, dataloader in dataloaders.items():
    print("Saving data %s.pth" % name)
    torch.save(dataloader, '%s.pth' % name)
                new_level.append(upsamped)
            result.append(new_level)

        return result


if __name__ == "__main__":

    from scipy.misc import lena
    import numpy as np
    import matplotlib.pylab as plt
    import time
    from scipy.ndimage import zoom

    im = lena().astype(np.float32)
    imshow(im, 'lena orig')

    tic = time.time()
    sp = SteerablePyramid()

    upsamp = sp.process_image(im, upsample=False)
    print "run time: %f" % (time.time() - tic)

    for i in range(0, 4):
        im = upsamp[i][0].copy()
        im.shape = (im.shape[0], im.shape[1])
        plt.imshow(im, cmap='gray')
        plt.show()