Exemple #1
0
    def __btn_genEuDM(self):

        self.plotClear()

        self.coords = dataSource.get_rand_data((1, self.N, 2), isInt=True,
                                               maxXY=maxXY, minXY=minXY)
        self.coords = utils.minmax_norm(self.coords)[0]

        data = utils.get_distanceSq_matrix(self.coords) ** 0.5
        data = utils.minmax_norm(data)[0]

        self.fillEntry(data=data, entrylist=self.genDmViews)

        self.coords = self.etm.norm(self.coords[0].detach().numpy())

        self.showingTrueLabel = False
        self.toggleTrueLabel()

        self.resetResult()
Exemple #2
0
    def __btn_rsCoord_fun(self, method_name=None):

        dm = self.getEntry(self.genDmViews).reshape(self.N, self.N)

        method_name = self.opt_method.get() if method_name is None else method_name

        if method_name == 'Deep MDS':

            if self.window_deepMDS is not None:
                return 

            self.plotRemove(method_name)
            self.__buildDeepMDSWindow()

            self.master.wait_window(self.window_deepMDS)
            self.window_deepMDS = None

        data, time = utils.time_measure(self.methods[method_name], [dm])

        data = torch.tensor(data)
        data = utils.minmax_norm(data.view(1, -1))[0]

        self.fillEntry(data=data, entrylist=self.rsCoordViews)

        coords = data.reshape(self.N, self.d)
        self.plotData(x=coords[:, 0], y=coords[:, 1], label=method_name)

        dm = torch.tensor(dm)

        rs_dm = utils.get_distanceSq_matrix(coords) ** 0.5
        rs_dm = utils.minmax_norm(rs_dm)[0] * torch.max(dm)

        loss = torch.mean(torch.abs(dm - rs_dm.view_as(dm)))

        self.label_loss.config(text='Loss = %s (%ss)' % (
            str(round(float(loss), 10)), str(round(time, 4))
        ))

        self.fillEntry(data=rs_dm, entrylist=self.rsDmViews)

        self.colorized = not self.colorized
        self.toggleColorizeDiff(self.rsDmViews)
Exemple #3
0
    def __init__(self, N, d, test_size):
        self.N = N
        self.d = d
        self.n_d = int(N * (N - 1) / 2)

        self.test_size = test_size
        self.etm = Algorithm(N, d)

        self.lossFun = CoordsToDMLoss(
            N=N, lossFun=nn.L1Loss(reduction='mean'))

        self.test_data = dataSource.generate_rand_DM(
            self.N, self.test_size, isInt=True, sample_space=(1000, 1))
        self.test_data = utils.minmax_norm(self.test_data, dmin=0)[0]
Exemple #4
0
    def __call__(self, x):

        prep_x = x

        if self.add_noise:
            prep_x = add_noise_to_dm(prep_x)

        if self.scale:
            prep_x = utils.minmax_norm(prep_x, dmin=0)[0]

        if self.flatten:
            prep_x = prep_x.view(prep_x.size()[0], 1, -1)

        prep_x = prep_x.clone().detach().requires_grad_(True)
        return prep_x, x
Exemple #5
0
    def __btn_genDM_fun(self):

        self.plotClear()

        self.coords = None

        data = dataSource.generate_rand_DM(
            N=self.N, sample_size=1, isInt=True, sample_space=(maxXY, minXY))
        data = utils.minmax_norm(data)[0]

        self.fillEntry(data=data, entrylist=self.genDmViews)

        self.showingTrueLabel = True
        self.toggleTrueLabel()

        self.resetResult()
    def validation_step(self, batch, batch_idx):
        image = batch['image']
        label = batch['label']

        with torch.no_grad():
            logit, _ = self.forward(image)

        dice = self.dice_metric(logit, label)

        if batch_idx == 0:
            image = image.detach().cpu()
            image = minmax_norm(image)

            label = label.detach().cpu()
            output = logit.argmax(dim=1).detach().cpu()

            n_images = min(self.config.save.n_save_images, image.size(0))

            save_modalities = ['t1ce']
            if 'flair' in self.config.dataset.modalities:
                save_modalities.append('flair')

            save_series = []
            for modality in save_modalities:
                idx = self.config.dataset.modalities.index(modality)
                save_image = image[:n_images, ...][:, idx, ...][:, np.newaxis,
                                                                ...]
                save_series.append(save_image)

            label = label[:n_images, ...].float()[:, np.newaxis, ...]
            output = output[:n_images, ...].float()[:, np.newaxis, ...]

            max_label_val = self.config.metric.n_classes - 1
            label /= max_label_val
            output /= max_label_val

            save_series.append(label)
            save_series.append(output)

            label_grid = torch.cat(save_series)
            self.logger.log_images('segmentation',
                                   label_grid,
                                   self.current_epoch,
                                   self.global_step,
                                   nrow=n_images)

        return dice
Exemple #7
0
def main():
    # Option Parser
    if (len(sys.argv) <= 1):
        print("train.py -h or --help to get guideline of input options")
        exit()
    use = "Usage: %prog [options] filename"
    parser = OptionParser(usage=use)
    parser.add_option("-d",
                      "--input-dir",
                      dest="input_dir",
                      action="store",
                      type="string",
                      help="input data dir")
    parser.add_option("-o",
                      "--output-dir",
                      dest="ckpt_dir",
                      action="store",
                      type="string",
                      help="ckpt data dir")
    parser.add_option("-t",
                      "--timesteps",
                      dest="timesteps",
                      action="store",
                      type="int",
                      help="timesteps")
    parser.add_option("-n",
                      "--num-input",
                      dest="num_input",
                      action="store",
                      type="int",
                      help="number of input (input vector's width)")

    (options, args) = parser.parse_args()
    input_dir = options.input_dir
    timesteps = options.timesteps
    num_input = options.num_input
    ckpt_dir = options.ckpt_dir

    X = np.fromfile(input_dir + '/X.dat', dtype=float)
    cardinality = int(X.shape[0] / (timesteps * num_input))
    X = X.reshape([cardinality, timesteps * num_input])
    Y = np.fromfile(input_dir + '/Y.dat', dtype=float)
    train_x, val_x, test_x, train_y, val_y, test_y = utl.train_val_test_split(
        X, Y, split_frac=0.80)
    #print("Data Set Size")
    #print("Train set: \t\t{}".format(train_x.shape),
    #      "\nValidation set: \t{}".format(val_x.shape),
    #      "\nTest set: \t\t{}".format(test_x.shape))

    # In[ ]:

    # Training Parameters
    learning_rate = 0.0015
    epochs = 200
    batch_size = 40
    #display_step = 200

    # Network Parameters
    #num_input = 2
    #timesteps = 480
    num_hidden = 2048
    num_classes = 1

    print("### Network Parameters ###")
    print("Learning Rate: {}".format(learning_rate))
    print("Batch Size: {}".format(batch_size))
    print("Size of Hidden Layer: {}".format(num_hidden))
    print("Timestep: {}".format(timesteps))
    print("------------------")
    X_ = tf.placeholder("float", [None, timesteps, num_input])
    Y_ = tf.placeholder("float", [None, num_classes])
    lr = tf.placeholder("float")

    weights = {'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))}
    biases = {'out': tf.Variable(tf.random_normal([num_classes]))}
    prediction = RNN(X_, weights, biases, timesteps, num_hidden)

    loss_op = tf.losses.mean_squared_error(Y_, prediction)
    #optimizer = tf.train.AdadeltaOptimizer(lr).minimize(loss_op)
    #optimizer = tf.train.AdamOptimizer(lr).minimize(loss_op)
    optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss_op)

    correct_pred = tf.equal(
        tf.cast((prediction / 1.8) - tf.round(prediction / 1.8), tf.float32),
        tf.cast((prediction / 1.8) - tf.round(Y_ / 1.8), tf.float32))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    init = tf.global_variables_initializer()

    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)
        saver = tf.train.Saver()

        n_batches = len(train_x) // batch_size

        for e in range(epochs):
            if epochs % 30 == 0:
                learning_rate = learning_rate * 0.95
            train_acc = []
            for ii, (x, y) in enumerate(
                    utl.get_batches(train_x, train_y, batch_size), 1):
                x = x.reshape((batch_size, timesteps, num_input))
                x_norm = utl.minmax_norm(x)

                feed = {X_: x_norm, Y_: y[:, None], lr: learning_rate}
                loss, acc, _ = sess.run([loss_op, accuracy, optimizer],
                                        feed_dict=feed)
                train_acc.append(acc)

                if (ii + 1) % n_batches == 0:
                    val_acc = []
                    for xx, yy in utl.get_batches(val_x, val_y, batch_size):
                        xx = xx.reshape((batch_size, timesteps, num_input))
                        xx_norm = utl.minmax_norm(xx)
                        feed = {
                            X_: xx_norm,
                            Y_: yy[:, None],
                            lr: learning_rate
                        }
                        val_batch_loss = sess.run([loss_op], feed_dict=feed)
                        val_acc.append(val_batch_loss)

                    print(
                        "Epoch: {}/{}...".format(e + 1, epochs),
                        "Batch: {}/{}...".format(ii + 1, n_batches),
                        "Train Loss: {:.3f}...".format(loss),
                        #"Train Accruacy: {:.3f}...".format(np.mean(train_acc)),
                        "Val Loss: {:.3f}".format(np.mean(val_acc)))

        test_data = test_x.reshape((-1, timesteps, num_input))
        test_norm = utl.minmax_norm(test_data)
        test_label = test_y
        print(
            "Testing Loss:",
            sess.run(loss_op,
                     feed_dict={
                         X_: test_norm,
                         Y_: test_label[:, None],
                         lr: learning_rate
                     }))

        # Model Checkpoint
        saver.save(sess, ckpt_dir)
Exemple #8
0
def test(net, config, logger, test_loader, test_info, step, model_file=None):
    with torch.no_grad():
        net.eval()

        if model_file is not None:
            net.load_state_dict(torch.load(model_file))

        final_res = {}
        final_res['version'] = 'VERSION 1.3'
        final_res['results'] = {}
        final_res['external_data'] = {
            'used': True,
            'details': 'Features from I3D Network'
        }

        num_correct = 0.
        num_total = 0.

        load_iter = iter(test_loader)

        for i in range(len(test_loader.dataset)):
            # _data: 视频特征  _label : 视频标签  vid_name: 视频名称 vid_num_seg:视频特征序列实际长度
            _data, _label, _, vid_name, vid_num_seg = next(load_iter)

            _data = _data.cuda()
            _label = _label.cuda()
            """
            cas_base: (1,T,21)
            score_supp: (1,21) 
            cas_supp: (1,T,21)
            fore_weights: (1,T,1)
            """
            _, cas_base, score_supp, cas_supp, fore_weights = net(_data)

            label_np = _label.cpu().numpy()
            score_np = score_supp[
                0, :-1].cpu().data.numpy()  #获取动作类的得分,不考虑背景类 (1,20)

            score_np[np.where(
                score_np < config.class_thresh)] = 0  # cls_thresh = 0.25
            score_np[np.where(score_np >= config.class_thresh)] = 1

            correct_pred = np.sum(
                label_np == score_np,
                axis=1)  # 统计的是预测的类别和label能够对应上的数目,只有20个类别全部预测正确才认为这个视频预测正确

            num_correct += np.sum((correct_pred == config.num_classes).astype(
                np.float32))  # 预测正确的视频数1
            num_total += correct_pred.shape[0]  # 视频数

            # 对数值进行限定,更加稳定
            cas_base = utils.minmax_norm(cas_base)  # (B,T,C+1)
            cas_supp = utils.minmax_norm(cas_supp)  # (B,T,C+1)

            pred = np.where(
                score_np > config.class_thresh)[0]  # 0.25, 当前视频预测动作类别索引

            if pred.any():
                cas_pred = cas_supp[0].cpu().numpy()[:, pred]  # (T, C+1)-->T
                cas_pred = np.reshape(cas_pred,
                                      (config.num_segments, -1, 1))  # (T,1,1)
                # [[[-0.035]],[[-0.025]],.....[[0.0029]]] (18000,1,1)
                cas_pred = utils.upgrade_resolution(cas_pred,
                                                    config.scale)  # scale:24

                proposal_dict = {}

                for i in range(len(config.act_thresh)
                               ):  #act_thresh = np.arange(0.0, 0.25, 0.025)
                    cas_temp = cas_pred.copy()  # (18000,1,1)
                    # [0,1,2,3,1531,1532,.......9910]
                    zero_location = np.where(
                        cas_temp[:, :, 0] < config.act_thresh[i])
                    cas_temp[zero_location] = 0

                    # cas_temp: (18000,len(pred),1) 其中len(pred)为满足条件的类别数目
                    seg_list = [
                    ]  # [[],[],..[]]  # 保存每个类别的预测结果,其中每一个子列表中保存对应类别着pos的索引
                    for c in range(len(pred)):
                        pos = np.where(
                            cas_temp[:, c, 0] > 0)  # [4,5,6,.....17999]
                        seg_list.append(pos)
                    # [[[5,0.0025,169.42,169.6]]] :(class, score, start, end)
                    proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
                                    vid_num_seg[0].cpu().item(), config.feature_fps, config.num_segments)

                    for i in range(len(proposals)):
                        class_id = proposals[i][0][0]

                        if class_id not in proposal_dict.keys():
                            proposal_dict[class_id] = []

                        proposal_dict[class_id] += proposals[i]

                final_proposals = []
                for class_id in proposal_dict.keys():
                    final_proposals.append(
                        utils.nms(proposal_dict[class_id], 0.7))

                final_res['results'][vid_name[0]] = utils.result2json(
                    final_proposals)

        test_acc = num_correct / num_total

        json_path = os.path.join(config.output_path, 'temp_result.json')
        with open(json_path, 'w') as f:
            json.dump(final_res, f)
            f.close()

        tIoU_thresh = np.linspace(0.1, 0.9, 9)
        anet_detection = ANETdetection(config.gt_path,
                                       json_path,
                                       subset='test',
                                       tiou_thresholds=tIoU_thresh,
                                       verbose=False,
                                       check_status=False)
        mAP, average_mAP = anet_detection.evaluate()

        logger.log_value('Test accuracy', test_acc, step)

        for i in range(tIoU_thresh.shape[0]):
            logger.log_value('mAP@{:.1f}'.format(tIoU_thresh[i]), mAP[i], step)

        logger.log_value('Average mAP', average_mAP, step)

        test_info["step"].append(step)
        test_info["test_acc"].append(test_acc)
        test_info["average_mAP"].append(average_mAP)

        for i in range(tIoU_thresh.shape[0]):
            test_info["mAP@{:.1f}".format(tIoU_thresh[i])].append(mAP[i])
Exemple #9
0
def test(net, config, logger, test_loader, test_info, step, model_file=None):
    with torch.no_grad():
        net.eval()

        if model_file is not None:
            net.load_state_dict(torch.load(model_file))

        final_res = {}
        final_res['version'] = 'VERSION 1.3'
        final_res['results'] = {}
        final_res['external_data'] = {
            'used': True,
            'details': 'Features from I3D Network'
        }

        num_correct = 0.
        num_total = 0.

        load_iter = iter(test_loader)

        for i in range(len(test_loader.dataset)):

            _data, _label, _, vid_name, vid_num_seg = next(load_iter)

            _data = _data.cuda()
            _label = _label.cuda()

            vid_num_seg = vid_num_seg[0].cpu().item()
            num_segments = _data.shape[1]

            score_act, _, feat_act, feat_bkg, features, cas_softmax = net(
                _data)

            feat_magnitudes_act = torch.mean(torch.norm(feat_act, dim=2),
                                             dim=1)
            feat_magnitudes_bkg = torch.mean(torch.norm(feat_bkg, dim=2),
                                             dim=1)

            label_np = _label.cpu().data.numpy()
            score_np = score_act[0].cpu().data.numpy()

            pred_np = np.zeros_like(score_np)
            pred_np[np.where(score_np < config.class_thresh)] = 0
            pred_np[np.where(score_np >= config.class_thresh)] = 1

            correct_pred = np.sum(label_np == pred_np, axis=1)

            num_correct += np.sum(
                (correct_pred == config.num_classes).astype(np.float32))
            num_total += correct_pred.shape[0]

            feat_magnitudes = torch.norm(features, p=2, dim=2)

            feat_magnitudes = utils.minmax_norm(feat_magnitudes,
                                                max_val=feat_magnitudes_act,
                                                min_val=feat_magnitudes_bkg)
            feat_magnitudes = feat_magnitudes.repeat(
                (config.num_classes, 1, 1)).permute(1, 2, 0)

            cas = utils.minmax_norm(cas_softmax * feat_magnitudes)

            pred = np.where(score_np >= config.class_thresh)[0]

            if len(pred) == 0:
                pred = np.array([np.argmax(score_np)])

            cas_pred = cas[0].cpu().numpy()[:, pred]
            cas_pred = np.reshape(cas_pred, (num_segments, -1, 1))

            cas_pred = utils.upgrade_resolution(cas_pred, config.scale)

            proposal_dict = {}

            feat_magnitudes_np = feat_magnitudes[0].cpu().data.numpy()[:, pred]
            feat_magnitudes_np = np.reshape(feat_magnitudes_np,
                                            (num_segments, -1, 1))
            feat_magnitudes_np = utils.upgrade_resolution(
                feat_magnitudes_np, config.scale)

            for i in range(len(config.act_thresh_cas)):
                cas_temp = cas_pred.copy()

                zero_location = np.where(
                    cas_temp[:, :, 0] < config.act_thresh_cas[i])
                cas_temp[zero_location] = 0

                seg_list = []
                for c in range(len(pred)):
                    pos = np.where(cas_temp[:, c, 0] > 0)
                    seg_list.append(pos)

                proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
                                vid_num_seg, config.feature_fps, num_segments)

                for i in range(len(proposals)):
                    class_id = proposals[i][0][0]

                    if class_id not in proposal_dict.keys():
                        proposal_dict[class_id] = []

                    proposal_dict[class_id] += proposals[i]

            for i in range(len(config.act_thresh_magnitudes)):
                cas_temp = cas_pred.copy()

                feat_magnitudes_np_temp = feat_magnitudes_np.copy()

                zero_location = np.where(feat_magnitudes_np_temp[:, :, 0] <
                                         config.act_thresh_magnitudes[i])
                feat_magnitudes_np_temp[zero_location] = 0

                seg_list = []
                for c in range(len(pred)):
                    pos = np.where(feat_magnitudes_np_temp[:, c, 0] > 0)
                    seg_list.append(pos)

                proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
                                vid_num_seg, config.feature_fps, num_segments)

                for i in range(len(proposals)):
                    class_id = proposals[i][0][0]

                    if class_id not in proposal_dict.keys():
                        proposal_dict[class_id] = []

                    proposal_dict[class_id] += proposals[i]

            final_proposals = []
            for class_id in proposal_dict.keys():
                final_proposals.append(utils.nms(proposal_dict[class_id], 0.6))

            final_res['results'][vid_name[0]] = utils.result2json(
                final_proposals)

        test_acc = num_correct / num_total

        json_path = os.path.join(config.output_path, 'result.json')
        with open(json_path, 'w') as f:
            json.dump(final_res, f)
            f.close()

        tIoU_thresh = np.linspace(0.1, 0.7, 7)
        anet_detection = ANETdetection(config.gt_path,
                                       json_path,
                                       subset='test',
                                       tiou_thresholds=tIoU_thresh,
                                       verbose=False,
                                       check_status=False)
        mAP, average_mAP = anet_detection.evaluate()

        logger.log_value('Test accuracy', test_acc, step)

        for i in range(tIoU_thresh.shape[0]):
            logger.log_value('mAP@{:.1f}'.format(tIoU_thresh[i]), mAP[i], step)

        logger.log_value('Average mAP', average_mAP, step)

        test_info["step"].append(step)
        test_info["test_acc"].append(test_acc)
        test_info["average_mAP"].append(average_mAP)

        for i in range(tIoU_thresh.shape[0]):
            test_info["mAP@{:.1f}".format(tIoU_thresh[i])].append(mAP[i])
Exemple #10
0
def main():
    if (len(sys.argv) <= 1):
        print("infer.py -h or --help to get guideline of input options")
        exit()
    use = "Usage: %prog [options] filename"
    parser = OptionParser(usage = use)
    parser.add_option("-d", "--input-dir", dest="input_dir", action="store", type="string", help="input data dir")
    parser.add_option("-t", "--timesteps", dest="timesteps", action="store", type="int", help="timesteps")
    parser.add_option("-n", "--num-input", dest="num_input", action="store", type="int", help="number of input (input vector's width)")
    parser.add_option("-c", "--ckpt-dir", dest="ckpt_dir", action="store", type="string", help="directory of checkpoint")

    (options, args) = parser.parse_args()
    input_dir = options.input_dir
    timesteps = options.timesteps
    num_input = options.num_input
    #ckpt_dir = options.ckpt_dir

    X = np.fromfile(input_dir + '/X.dat', dtype=float)
    cardinality = int(X.shape[0]/(timesteps * num_input))
    X = X.reshape([cardinality, timesteps, num_input])
    Y = np.fromfile(input_dir + '/Y.dat', dtype=float)
    

    train_x, val_x, test_x, train_y, val_y, test_y = utl.train_val_test_split(X, Y, split_frac=0.80)
     
    # Training Parameters
    learning_rate = 0.001
    epochs =800 
    batch_size = 40
    #display_step = 200
    
    # Network Parameters
    #num_input = 2 
    #timesteps = 480 
    num_hidden = 2048 
    num_classes = 1
   
    print("### Network Parameters ###")
    print("Learning Rate: {}".format(learning_rate))
    print("Batch Size: {}".format(batch_size))
    print("Size of Hidden Layer: {}".format(num_hidden))
    print("Timestep: {}".format(timesteps)) 
    print("------------------")
    X_ = tf.placeholder("float", [None, timesteps, num_input])
    Y_ = tf.placeholder("float", [None, num_classes])
    lr = tf.placeholder("float")
    
    weights = {
        'out':tf.Variable(tf.random_normal([num_hidden,num_classes])),
    }
    biases = {
        'out':tf.Variable(tf.random_normal([num_classes]))
    }
    prediction = RNN(X_, weights, biases, timesteps, num_hidden)
    
    loss_op = tf.losses.mean_squared_error(Y_, prediction)
    #optimizer = tf.train.AdadeltaOptimizer(lr).minimize(loss_op)
    #optimizer = tf.train.AdamOptimizer(lr).minimize(loss_op)
    optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss_op)
     
    correct_pred = tf.equal(tf.cast( (prediction/1.8) - tf.round(prediction/1.8), tf.float32), tf.cast( (prediction/1.8)-tf.round(Y_/1.8), tf.float32))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    
    # Restore the ckpt
    SAVER_DIR = options.ckpt_dir 
    saver = tf.train.Saver()
    checkpoint_path = os.path.join(SAVER_DIR, SAVER_DIR)
    ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
    
    
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        #new_saver = tf.train.import_meta_graph('ckpt.meta')
        saver.restore(sess, ckpt.model_checkpoint_path) 
        test_norm = utl.minmax_norm(test_x)
        print("loss test: %f" % loss_op.eval(feed_dict = {X_:test_norm, Y_:test_y[:, None]}))

        X_norm = utl.minmax_norm(X)
        pred = np.array(prediction.eval(feed_dict = {X_:X_norm, Y_:Y[:, None]}))
        
        pred_diagnosis = [1 if x[0]>=1.8 else 0 for x in list(pred)]
        y_diagnosis = [1 if x>=1.8 else 0 for x in list(Y)]
        evaluation = np.equal(pred_diagnosis, y_diagnosis)
        print(np.mean(evaluation))
        f = open(SAVER_DIR + '/result.txt', 'w')
        for i in range(0, len(Y)):
            f.write(str(pred[i][0]) + ', ' + str(Y[i])+'\n')
        f2 = open(SAVER_DIR + '/result_diagnosis.txt', 'w')
        for i in range(0, len(Y)):
            f2.write(str(pred_diagnosis[i]) + ', ' + str(y_diagnosis[i])+'\n')
        f2.close()
        f.close()
        output_volume = None

        for file in dataset.get_patient_samples(patient_id):
            sample = dataset.load_file(file)
            patient_id = sample['patient_id']
            n_slice = sample['n_slice']
            image = sample['image'].unsqueeze(0)
            label = sample['label'].unsqueeze(0)

            with torch.no_grad():
                lat = encoder(image)
                qlat, l_lat, ids = vq(lat)
                logit = decoder(qlat)

            image = image.detach().cpu()[0, 1, ...]
            image = minmax_norm(image)
            label = label.detach().cpu()[0, ...]
            output = logit.argmax(dim=1).detach().cpu()[0, ...]

            label_volume = concat(label_volume, label)
            output_volume = concat(output_volume, output)

        dice_result = calc_dice(label_volume, output_volume,
                                index_to_class_name)
        dice_result.update({
            'patient_id': patient_id,
        })

        result_summary['patient_id'].append(dice_result['patient_id'])
        result_summary['Background'].append(dice_result['Background'])
        result_summary['NET'].append(dice_result['NET'])
Exemple #12
0
from torch import nn, Tensor
from datetime import datetime

torch.set_default_tensor_type('torch.DoubleTensor')

batch = 16
test_size = 1000
ss, N, d = 3200, 10, 2

n_dist = int(N * (N - 1) / 2)

test_data = dataSource.generate_rand_DM(N,
                                        sample_size=test_size,
                                        isInt=True,
                                        sample_space=(1000, 1))
test_data = utils.minmax_norm(test_data, dmin=0)[0]


def test(helper, test_data):

    rs, target = helper._predict(test_data)
    loss = helper.lossFun(rs, target)

    return loss


def train(helper, dlr, logFilePath):

    EPOCH = 100

    print("Training ", helper.id)
Exemple #13
0
def add_noise_to_dm(x):

    noise = torch.randn(x.size()) * torch.mean(x) * 0.5
    x = x + torch.abs(noise)
    return utils.minmax_norm(x, dmin=0)[0]
Exemple #14
0
    def reload_custom(self, dist_func, n_arg):

        self.test_data = dataSource.custom_distance(
            self.N, n_arg, self.test_size, isInt=True, sample_space=(1000, 1), dist_func=dist_func)
        self.test_data = utils.minmax_norm(self.test_data, dmin=0)[0]
Exemple #15
0
    def reload_rand(self):

        self.test_data = dataSource.generate_rand_DM(
            self.N, self.test_size, isInt=True, sample_space=(1000, 1))
        self.test_data = utils.minmax_norm(self.test_data, dmin=0)[0]
Exemple #16
0
    R = R - numpy.tile(numpy.mean(R, axis=1), (N, 1)).T
    _, vects = linalg.eigh(R.dot(R.T))

    return vects[:, ::-1].T.dot(R).T


#%%
import torch
import utils

if __name__ == "__main__":
    torch.set_default_tensor_type('torch.DoubleTensor')

    pts = torch.tensor([[0, 2], [2, 5], [6, 8], [8, 7], [9, 10]])

    dm = utils.get_distanceSq_matrix(pts)[0]**0.5
    dm = utils.minmax_norm(dm)[0]
    print(dm)

    dm = numpy.array(dm)
    rs = landmarkMDS(dm, 5, 2)
    rs = torch.tensor(rs)

    rs_dm = utils.get_distanceSq_matrix(rs)[0]**0.5
    rs_dm = utils.minmax_norm(rs_dm)[0]
    print(rs_dm)

    print(torch.sum(((rs_dm - torch.tensor(dm))**2)))

# %%
Exemple #17
0
    from torch import nn
    from mds.cmds import classicalMDS
    from lossFunction import CoordsToDMLoss
    torch.set_default_tensor_type('torch.DoubleTensor')

    dist_func = lambda p1, p2: torch.sum(torch.abs((p2 - p1)))

    cus = custom_distance(10,
                          3,
                          1000,
                          isInt=True,
                          sample_space=(1000, 1),
                          dist_func=dist_func)

    lossFun = CoordsToDMLoss(N=10, d=2, lossFun=nn.L1Loss(reduction='mean'))

    coords, dms = cus

    dms = utils.minmax_norm(dms, dmin=0)[0] + 1e-8
    dms = dms.detach().requires_grad_(True)

    cmds_rs = []

    for d in dms:

        d1 = numpy.array(d.data)
        cmds_rs.append(torch.tensor(classicalMDS(d1, 2)))

    cmds_rs = torch.stack(cmds_rs)
    print("cmds_loss: \t", lossFun(cmds_rs, dms))
Exemple #18
0
    def forward(self, rs, target):

        rs = utils.minmax_norm(rs, dmin=0)[0]
        target = utils.minmax_norm(target, dmin=0)[0]

        return self.lossFun(rs, target)