def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    output = model(img)

    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')
    out0 = output[0].cpu().detach().numpy()
    out1 = output[1].cpu().detach().numpy()

    output = [out0, out1]

    return utils.post_processing(conf_thresh, nms_thresh, out0, out1)
def main():
    aug = get_aug([
        A.HorizontalFlip(p=.5),
        A.RandomSizedBBoxSafeCrop(width=448,
                                  height=448,
                                  erosion_rate=0,
                                  interpolation=cv2.INTER_CUBIC),
        A.RGBShift(p=.5),
        A.Blur(blur_limit=5, p=0.5),
        A.RandomBrightnessContrast(p=0.5),
        A.CLAHE(p=0.5),
    ])

    voc = VOCDataset(cfg.DATASET_PATH,
                     classes_list=cfg.CLASSES,
                     image_set='train',
                     transforms=aug)

    for i in range(1000):
        image, out = voc[i]

        boxes = post_processing(out)
        im_size = image.shape[0]
        for det in boxes:
            pt1 = (det[0] - det[2] / 2, det[1] - det[3] / 2)
            pt2 = (det[0] + det[2] / 2, det[1] + det[3] / 2)
            pt1 = (int(pt1[0] * im_size), int(pt1[1] * im_size))
            pt2 = (int(pt2[0] * im_size), int(pt2[1] * im_size))

            cv2.rectangle(image, pt1, pt2, (255, 33, 44))

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow("test", image)
        cv2.waitKeyEx(-1)
Exemple #3
0
def create_dataset():
    """read the config file"""
    with open("/root/config.json", "r") as f:
        config = json.load(f)

    # create environmental variables
    for (key, value) in config.items():
        os.environ[key] = str(value)

    # run blender
    command = '/usr/lib/blender/blender {} --python {} --background'.\
        format("/root/models/default.blend", "/root/rendering.py")
    os.system(command)

    # post processing
    post_processing()
Exemple #4
0
    def train(self, epoch, trainloader, print_every=100):
        '''
        method for training
        '''
        self.model.train()
        loss_batch = 0
        if epoch % 100 == 0 and epoch > 0:
            self.adjust_lr(step=0.1)
        TP, FP, FN, TN = 0, 0, 0, 0
        for b_idx, (train_data, train_labels, actual_centers) in enumerate(trainloader):
            if self.use_gpu:
                train_data = train_data.cuda(non_blocking=True)
                train_labels = train_labels.cuda()
            
            output = self.model(train_data)
            loss = self.loss(output, train_labels)
            
            if self.l2:
                loss = self.l2_regularization(loss, self.l2)
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            output = output.cpu().detach().squeeze()
            if len(output.shape)<3:
                output = output.unsqueeze(0)
            _, predicted_centers, maps_area = post_processing(output.numpy(), self.threshold)

            TP_t, FP_t, TN_t, FN_t = tp_fp_tn_fn_alt(actual_centers, predicted_centers, maps_area, self.min_radius)

            TP += TP_t
            FP += FP_t
            FN += FN_t
            TN += TN_t
            
            if b_idx % opt.print_every == 0:
                print('Train Epoch: {0} [{1}/{2} ({3:.0f}%)]\t Loss {4}'.
                      format(epoch, b_idx * len(train_data),
                             len(trainloader.dataset),
                             100. * b_idx / len(trainloader), loss))
            
            loss_ = loss.item()
            self.iter_loss_train.append(loss_)
            loss_batch += loss_
        
        FDR_train, RC_train, accuracy_train = performance_metric(TP, FP, FN, TN)
        self.fdr_train.append(FDR_train)
        self.accuracy_train.append(accuracy_train)
        self.RC_train.append(RC_train)
        
        loss_batch /= len(trainloader)

        print('Epoch = {} Train TP {} FP {} TN {} FN {} '.format(epoch, TP, FP, TN, FN))
        print('Train loss = {0} FDR = {1:.4f} , RC {2:.4f} =, accuracy = {3:.4f}'.format(loss_batch, FDR_train, RC_train, accuracy_train))
        self.train_loss.append(loss_batch)
Exemple #5
0
def do_detect(model, img, conf_thresh, n_classes, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if isinstance(img, Image.Image):
        width = img.width
        height = img.height
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
        img = img.view(height, width,
                       3).transpose(0, 1).transpose(0, 2).contiguous()
        img = img.view(1, 3, height, width)
        img = img.float().div(255.0)
    elif type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    boxes_and_confs = model(img)

    # print(boxes_and_confs)
    output = []

    for i in range(len(boxes_and_confs)):
        output.append([])
        output[-1].append(boxes_and_confs[i][0].cpu().detach().numpy())
        output[-1].append(boxes_and_confs[i][1].cpu().detach().numpy())
        output[-1].append(boxes_and_confs[i][2].cpu().detach().numpy())

    t2 = time.time()

    print('-----------------------------------')
    print('          Preprocess : %f' % (t1 - t0))
    print('     Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')
    '''
    for i in range(len(boxes_and_confs)):
        output.append(boxes_and_confs[i].cpu().detach().numpy())
    '''

    return utils.post_processing(img, conf_thresh, n_classes, nms_thresh,
                                 output)
Exemple #6
0
def testing(net, test_loader, device):

    net.eval()
    predict = []
    for idx, sample in enumerate(test_loader):

        data = torch.Tensor(sample['data'])
        data_lens = sample['data_lens']
        vocal_pitch = sample['vocal_pitch']
        _range = sample['range']
        data_length = list(data.shape)[0]

        data = data.to(device, dtype=torch.float)
        output = net(data)
        answer = post_processing(output, vocal_pitch, _range)
        predict.extend(answer)
        print(len(predict))

    return predict
    def predict_with_model(self, model_name):

        model = load_model('../Models/' + model_name,
                           custom_objects={
                               'binary_crossentropy_valid':
                               binary_crossentropy_valid
                           })
        test = self.dataset.test.copy()
        if len(test.shape) == 3:
            test = np.expand_dims(test, -1)
        predict = model.predict(test)
        predict = predict[:, :, :, 0]
        out = []
        for i in range(predict.shape[0]):
            pred = square_to_original(predict[i, ],
                                      self.dataset.test_shape[i, ])
            out.append(post_processing(pred))

        return out
def real_time_lrp(conf):
    """Method to display feature relevance scores in real time.

    Args:
        conf: Dictionary consisting of configuration parameters.
    """
    record_video = conf["playback"]["record_video"]

    webcam = Webcam()
    lrp = RelevancePropagation(conf)

    if record_video:
        recorder = VideoRecorder(conf)

    while True:
        t0 = time.time()

        frame = webcam.get_frame()
        heatmap = lrp.run(frame)
        heatmap = post_processing(frame, heatmap, conf)
        cv2.imshow("LRP", heatmap)

        if record_video:
            recorder.record(heatmap)

        t1 = time.time()
        fps = 1.0 / (t1 - t0)
        print("{:.1f} FPS".format(fps))

        if cv2.waitKey(1) % 256 == 27:
            print("Escape pressed.")
            break

    if record_video:
        recorder.release()

    webcam.turn_off()
    cv2.destroyAllWindows()
Exemple #9
0
    def test(self, epoch, testloader):
        '''
        method for testing
        '''
        self.model.eval()
        TP, FP, FN, TN = 0, 0, 0, 0
        with torch.no_grad():
            batch_loss = 0
            for test_data, test_labels, actual_centers in testloader:
                if self.use_gpu:
                    test_data, test_labels = test_data.cuda(), test_labels.cuda()
                output = self.model(test_data)
                loss_ = self.loss(output, test_labels)
                output = output.cpu().squeeze()
                if len(output.shape)<3:
                    output = output.unsqueeze(0)

                _, predicted_centers, maps_area = post_processing(output.numpy(), self.threshold)
                TP_test, FP_test, TN_test, FN_test = tp_fp_tn_fn_alt(actual_centers, predicted_centers, maps_area, self.min_radius)
                TP += TP_test
                FP += FP_test
                FN += FN_test
                TN += TN_test
                self.iter_loss_test.append(loss_)
                batch_loss += loss_
            
            batch_loss /= len(testloader)
            FDR_test, RC_test, accuracy_test = performance_metric(TP, FP, FN, TN)
            
            self.fdr_test.append(FDR_test)
            self.accuracy_test.append(accuracy_test)
            self.RC_test.append(RC_test)

            print('epoch {} Test TP {} FP {} TN {} FN {}'.format(epoch, TP, FP, TN, FN))            
            print('Test loss = {0} FDR = {1:.4f} , RC {2:.4f} =, accuracy = {3:.4f}'.format(batch_loss, FDR_test, RC_test, accuracy_test))

            self.test_loss.append(batch_loss)
                test_net.train()
                
                if count>=steps:
                    return
            
            
plt.show()
plt.imshow(utils.get_numpy_image_to_plot(style_img_tensor.cpu().detach().numpy())[0])

train(0,100)

# Model saving,reloading

#torch.save(test_net.state_dict(), "saved_model")

# the_model = TheModelClass(*args, **kwargs)
# the_model.load_state_dict(torch.load(PATH))

#for testing
for x, _ in test_train_loader:
    test_new_content = x.to(device)
    break

img_output = test_net(test_new_content)
plt.imshow(utils.get_numpy_image_to_plot(test_new_content.cpu().detach().numpy())[0])

plt.imshow(utils.get_numpy_image_to_plot(img_output.cpu().detach().numpy())[0])

history = utils.post_processing()

        'Model Comparison of Decoding Probability of Success_RT_features.png'),
                  dpi=500,
                  bbox_inches='tight')

    id_vars = [
        'model',
        'score',
        'sub',
        'window',
    ]
    value_vars = [
        'RT_correct',
        'RT_awareness',
        'RT_confidence',
    ]
    df_post = post_processing(df, id_vars, value_vars)
    c = df_post.groupby(['Subjects', 'Models', 'Window',
                         'Attributes']).mean().reset_index()
    g = sns.catplot(x='Window',
                    y='Values',
                    hue='Attributes',
                    hue_order=value_vars,
                    row='Models',
                    row_order=['DecisionTreeClassifier', 'LogisticRegression'],
                    data=c,
                    aspect=3,
                    sharey=False,
                    dodge=0.1,
                    kind='point')
    (g.set_axis_labels(
        'Trials look back', ''
Exemple #12
0
                                             config.direction,
                                             config.cell_type,
                                             str(config.layers))
    config.model_dir = join(config.output_dir, config.model_name)

    if not exists(config.output_dir):
        mkdir(config.output_dir)
    if not exists(config.model_dir):
        mkdir(config.model_dir)

    model = RNN_Model(config)

    if config.testing:
        test_ids, test_data, _ = load_timit(config, data_set='test')
        test_ids = np.expand_dims(test_ids, 1)

        predictions = model.test(test_data)
        predictions = np.expand_dims(
            post_processing(config, predictions, threshold=2), 1)

        outputs = np.append(test_ids, predictions, axis=1)
        df = pd.DataFrame(outputs).to_csv(join(
            config.model_dir, '{}.csv'.format(config.model_name)),
                                          index=False,
                                          header=['id', 'phone_sequence'])

    else:
        train_ids, train_data, train_labels = load_timit(config,
                                                         data_set='train')
        model.train(train_data, train_labels)
Exemple #13
0
def main(args):

    # Device Configuration #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Fix Seed for Reproducibility #
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Samples, Plots, Weights and CSV Path #
    paths = [
        args.samples_path, args.weights_path, args.csv_path,
        args.inference_path
    ]
    for path in paths:
        make_dirs(path)

    # Prepare Data #
    data = pd.read_csv(args.data_path)[args.column]

    # Prepare Data #
    scaler_1 = StandardScaler()
    scaler_2 = StandardScaler()
    preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.constant,
                                       args.delta)

    train_X, train_Y, test_X, test_Y = prepare_data(data, preprocessed_data,
                                                    args)

    train_X = moving_windows(train_X, args.ts_dim)
    train_Y = moving_windows(train_Y, args.ts_dim)

    test_X = moving_windows(test_X, args.ts_dim)
    test_Y = moving_windows(test_Y, args.ts_dim)

    # Prepare Networks #
    if args.model == 'conv':
        D = ConvDiscriminator(args.ts_dim).to(device)
        G = ConvGenerator(args.latent_dim, args.ts_dim).to(device)

    elif args.model == 'lstm':
        D = LSTMDiscriminator(args.ts_dim).to(device)
        G = LSTMGenerator(args.latent_dim, args.ts_dim).to(device)

    else:
        raise NotImplementedError

    #########
    # Train #
    #########

    if args.mode == 'train':

        # Loss Function #
        if args.criterion == 'l2':
            criterion = nn.MSELoss()

        elif args.criterion == 'wgangp':
            pass

        else:
            raise NotImplementedError

        # Optimizers #
        if args.optim == 'sgd':
            D_optim = torch.optim.SGD(D.parameters(), lr=args.lr, momentum=0.9)
            G_optim = torch.optim.SGD(G.parameters(), lr=args.lr, momentum=0.9)

        elif args.optim == 'adam':
            D_optim = torch.optim.Adam(D.parameters(),
                                       lr=args.lr,
                                       betas=(0., 0.9))
            G_optim = torch.optim.Adam(G.parameters(),
                                       lr=args.lr,
                                       betas=(0., 0.9))

        else:
            raise NotImplementedError

        D_optim_scheduler = get_lr_scheduler(D_optim, args)
        G_optim_scheduler = get_lr_scheduler(G_optim, args)

        # Lists #
        D_losses, G_losses = list(), list()

        # Train #
        print(
            "Training Time Series GAN started with total epoch of {}.".format(
                args.num_epochs))

        for epoch in range(args.num_epochs):

            # Initialize Optimizers #
            G_optim.zero_grad()
            D_optim.zero_grad()

            #######################
            # Train Discriminator #
            #######################

            if args.criterion == 'l2':
                n_critics = 1
            elif args.criterion == 'wgangp':
                n_critics = 5

            for j in range(n_critics):
                series, start_dates = get_samples(train_X, train_Y,
                                                  args.batch_size)

                # Data Preparation #
                series = series.to(device)
                noise = torch.randn(args.batch_size, 1,
                                    args.latent_dim).to(device)

                # Adversarial Loss using Real Image #
                prob_real = D(series.float())

                if args.criterion == 'l2':
                    real_labels = torch.ones(prob_real.size()).to(device)
                    D_real_loss = criterion(prob_real, real_labels)

                elif args.criterion == 'wgangp':
                    D_real_loss = -torch.mean(prob_real)

                # Adversarial Loss using Fake Image #
                fake_series = G(noise)
                prob_fake = D(fake_series.detach())

                if args.criterion == 'l2':
                    fake_labels = torch.zeros(prob_fake.size()).to(device)
                    D_fake_loss = criterion(prob_fake, fake_labels)

                elif args.criterion == 'wgangp':
                    D_fake_loss = torch.mean(prob_fake)
                    D_gp_loss = args.lambda_gp * get_gradient_penalty(
                        D, series.float(), fake_series.float(), device)

                # Calculate Total Discriminator Loss #
                D_loss = D_fake_loss + D_real_loss

                if args.criterion == 'wgangp':
                    D_loss += args.lambda_gp * D_gp_loss

                # Back Propagation and Update #
                D_loss.backward()
                D_optim.step()

            ###################
            # Train Generator #
            ###################

            # Adversarial Loss #
            fake_series = G(noise)
            prob_fake = D(fake_series)

            # Calculate Total Generator Loss #
            if args.criterion == 'l2':
                real_labels = torch.ones(prob_fake.size()).to(device)
                G_loss = criterion(prob_fake, real_labels)

            elif args.criterion == 'wgangp':
                G_loss = -torch.mean(prob_fake)

            # Back Propagation and Update #
            G_loss.backward()
            G_optim.step()

            # Add items to Lists #
            D_losses.append(D_loss.item())
            G_losses.append(G_loss.item())

            # Adjust Learning Rate #
            D_optim_scheduler.step()
            G_optim_scheduler.step()

            # Print Statistics, Save Model Weights and Series #
            if (epoch + 1) % args.log_every == 0:

                # Print Statistics and Save Model #
                print("Epochs [{}/{}] | D Loss {:.4f} | G Loss {:.4f}".format(
                    epoch + 1, args.num_epochs, np.average(D_losses),
                    np.average(G_losses)))
                torch.save(
                    G.state_dict(),
                    os.path.join(
                        args.weights_path,
                        'TS_using{}_and_{}_Epoch_{}.pkl'.format(
                            G.__class__.__name__, args.criterion.upper(),
                            epoch + 1)))

                # Generate Samples and Save Plots and CSVs #
                series, fake_series = generate_fake_samples(
                    test_X, test_Y, G, scaler_1, scaler_2, args, device)
                plot_series(series, fake_series, G, epoch, args,
                            args.samples_path)
                make_csv(series, fake_series, G, epoch, args, args.csv_path)

    ########
    # Test #
    ########

    elif args.mode == 'test':

        # Load Model Weights #
        G.load_state_dict(
            torch.load(
                os.path.join(
                    args.weights_path, 'TS_using{}_and_{}_Epoch_{}.pkl'.format(
                        G.__class__.__name__, args.criterion.upper(),
                        args.num_epochs))))

        # Lists #
        real, fake = list(), list()

        # Inference #
        for idx in range(0, test_X.shape[0], args.ts_dim):

            # Do not plot if the remaining data is less than time dimension #
            end_ix = idx + args.ts_dim

            if end_ix > len(test_X) - 1:
                break

            # Prepare Data #
            test_data = test_X[idx, :]
            test_data = np.expand_dims(test_data, axis=0)
            test_data = np.expand_dims(test_data, axis=1)
            test_data = torch.from_numpy(test_data).to(device)

            start = test_Y[idx, 0]

            noise = torch.randn(args.val_batch_size, 1,
                                args.latent_dim).to(device)

            # Generate Fake Data #
            with torch.no_grad():
                fake_series = G(noise)

            # Convert to Numpy format for Saving #
            test_data = np.squeeze(test_data.cpu().data.numpy())
            fake_series = np.squeeze(fake_series.cpu().data.numpy())

            test_data = post_processing(test_data, start, scaler_1, scaler_2,
                                        args.delta)
            fake_series = post_processing(fake_series, start, scaler_1,
                                          scaler_2, args.delta)

            real += test_data.tolist()
            fake += fake_series.tolist()

        # Plot, Save to CSV file and Derive Metrics #
        plot_series(real, fake, G, args.num_epochs - 1, args,
                    args.inference_path)
        make_csv(real, fake, G, args.num_epochs - 1, args, args.inference_path)
        derive_metrics(real, fake, args)

    else:
        raise NotImplementedError
Exemple #14
0
def do_training(on_net, off_net, train_loader, val_loader, device):
    num_epoch = 60
    criterion_set = nn.BCELoss()
    #criterion_pitch = nn.SmoothL1Loss()
    #criterion_pitch = nn.CrossEntropyLoss()
    train_loss = 0.0
    total_length = 0
    best_f1 = 0
    best_val_loss = 100

    optimizer = optim.AdamW(list(on_net.parameters()) +
                            list(off_net.parameters()),
                            lr=2e-3)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'max',
                                                     factor=0.5,
                                                     patience=4)

    for epoch in range(num_epoch):
        total_length = 0.0
        start_time = time.time()
        train_loss = 0.0
        total_onset_loss = 0.0
        total_offset_loss = 0.0

        COn = 0
        COnP = 0
        COnPOff = 0
        weighted_f1 = 0
        count = 0
        for param_group in optimizer.param_groups:
            print("lr: {}".format(param_group['lr']))

        on_net.train()
        off_net.train()
        for batch_idx, sample in enumerate(train_loader):

            # print(len(sample['label']))
            # print(sample['label'][0].shape)
            # print(sample['label'][1].shape)
            data = sample['data']
            target = sample['label']
            data_lens = sample['data_lens']
            vocal_pitch = sample['vocal_pitch']
            _range = sample['range']

            data_length = list(data.shape)[0]

            data = data.to(device, dtype=torch.float)
            target = target.to(device, dtype=torch.float)

            optimizer.zero_grad()
            on_output = on_net(data)
            off_output = off_net(data)

            on_loss = criterion_set(
                on_output, torch.narrow(target, dim=2, start=0, length=1))
            off_loss = criterion_set(
                off_output, torch.narrow(target, dim=2, start=1, length=1))
            #loss = criterion_set(output, torch.narrow(target, dim= 2, start= 0, length= 2))
            loss = on_loss + off_loss
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            total_length = total_length + 1
            output = torch.cat([on_output, off_output], dim=2)
            if epoch >= 10:
                gt = np.array(sample['groundtruth'])
                predict = post_processing(output, vocal_pitch, _range)
                #predict = np.array(predict)

                for i in range(len(predict)):
                    count += 1
                    tmp_predict = np.where(
                        np.array(predict[i]) > 0, np.array(predict[i]), 0.0001)
                    if tmp_predict.shape[0] == 0:
                        continue
                    score = mir_eval.transcription.evaluate(
                        gt[i][:, :2], gt[i][:, 2], tmp_predict[:, :2],
                        tmp_predict[:, 2])
                    COn = COn + score['Onset_F-measure']
                    COnP = COnP + score['F-measure_no_offset']
                    COnPOff = COnPOff + score['F-measure']
                    weighted_f1 = COn * 0.2 + COnP * 0.6 + COnPOff * 0.2

            # if batch_idx % 50 == 0:
            #     print ("epoch %d, sample %d, loss %.6f" %(epoch, batch_idx, total_loss))

        print('epoch %d, total loss: %.6f, training time: %.3f sec' %
              (epoch, train_loss / total_length, time.time() - start_time))

        if epoch >= 10:
            print(
                "epoch %d, COn %.6f, COnP %.6f, COnPOff %.6f, Training weighted_f1 %.6f"
                % (epoch, COn / count, COnP / count, COnPOff / count,
                   weighted_f1 / count))

        # evaluate
        if epoch >= 10:
            val_total_loss = 0
            COn = 0
            COnP = 0
            COnPOff = 0
            weighted_f1 = 0
            count = 0
            on_net.eval()
            off_net.eval()
            for idx, sample in enumerate(val_loader):
                data = torch.Tensor(sample['data'])
                target = torch.Tensor(sample['label'])
                data_lens = sample['data_lens']
                vocal_pitch = sample['vocal_pitch']
                _range = sample['range']

                data_length = list(data.shape)[0]

                data = data.to(device, dtype=torch.float)
                target = target.to(device, dtype=torch.float)

                on_output = on_net(data)
                off_output = off_net(data)

                on_loss = criterion_set(
                    on_output, torch.narrow(target, dim=2, start=0, length=1))
                off_loss = criterion_set(
                    off_output, torch.narrow(target, dim=2, start=1, length=1))
                set_loss = on_loss + off_loss

                val_total_loss += set_loss.item()

                output = torch.cat([on_output, off_output], dim=2)
                predict = post_processing(output, vocal_pitch, _range)

                gt = np.array(sample['groundtruth'])
                #predict = np.array(predict)

                for i in range(len(predict)):
                    count += 1
                    tmp_predict = np.where(
                        np.array(predict[i]) > 0, np.array(predict[i]), 0.0001)
                    if tmp_predict.shape[0] == 0:
                        continue
                    score = mir_eval.transcription.evaluate(
                        gt[i][:, :2], gt[i][:, 2], tmp_predict[:, :2],
                        tmp_predict[:, 2])
                    COn = COn + score['Onset_F-measure']
                    COnP = COnP + score['F-measure_no_offset']
                    COnPOff = COnPOff + score['F-measure']
                    weighted_f1 = COn * 0.2 + COnP * 0.6 + COnPOff * 0.2

            scheduler.step(weighted_f1)

            if weighted_f1 > best_f1:
                best_f1 = weighted_f1
                #model_path= f'ST_{epoch}.pt'
                torch.save(on_net.state_dict(), './onset_model.pkl')
                torch.save(off_net.state_dict(), './offset_model.pkl')

            if epoch >= 10:
                print('epoch %d, total val loss: %.6f ' %
                      (epoch, val_total_loss / total_length))

                print(
                    "epoch %d, COn %.6f, COnP %.6f, COnPOff %.6f, Validation weighted_f1 %.6f"
                    % (epoch, COn / count, COnP / count, COnPOff / count,
                       weighted_f1 / count))

    return net
Exemple #15
0
def generate_timeseries(args):

    # Device Configuration #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Inference Path #
    make_dirs(args.inference_path)

    # Prepare Generator #
    if args.model == 'skip':
        G = SkipGenerator(args.latent_dim, args.ts_dim,
                          args.conditional_dim).to(device)
        G.load_state_dict(
            torch.load(
                os.path.join(
                    args.weights_path,
                    'TimeSeries_Generator_using{}_Epoch_{}.pkl'.format(
                        args.criterion.upper(), args.num_epochs))))

    else:
        raise NotImplementedError

    # Prepare Data #
    data = pd.read_csv(args.data_path)[args.column]

    scaler_1 = StandardScaler()
    scaler_2 = StandardScaler()

    preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.delta)

    X = moving_windows(preprocessed_data, args.ts_dim)
    label = moving_windows(data.to_numpy(), args.ts_dim)

    # Lists #
    real, fake = list(), list()

    # Inference #
    for idx in range(0, data.shape[0], args.ts_dim):

        end_ix = idx + args.ts_dim

        if end_ix > len(data) - 1:
            break

        samples = X[idx, :]
        samples = np.expand_dims(samples, axis=0)
        samples = np.expand_dims(samples, axis=1)

        samples = torch.from_numpy(samples).to(device)
        start_dates = label[idx, 0]

        noise = torch.randn(args.val_batch_size, 1, args.latent_dim).to(device)

        with torch.no_grad():
            fake_series = G(noise)
        fake_series = torch.cat((samples[:, :, :args.conditional_dim].float(),
                                 fake_series.float()),
                                dim=2)

        samples = np.squeeze(samples.cpu().data.numpy())
        fake_series = np.squeeze(fake_series.cpu().data.numpy())

        samples = post_processing(samples, start_dates, scaler_1, scaler_2,
                                  args.delta)
        fake_series = post_processing(fake_series, start_dates, scaler_1,
                                      scaler_2, args.delta)

        real += samples.tolist()
        fake += fake_series.tolist()

    plot_sample(real, fake, args.num_epochs - 1, args)
    make_csv(real, fake, args.num_epochs - 1, args)
Exemple #16
0
 #g = sns.catplot(     x       = 'window',
 #                     y       = 'score',
 #                     hue     = 'model',
 #                     data    = df,
 #                     aspect  = 3,
 #                     kind    = 'point',
 #    hue_order = ['DecisionTreeClassifier','LogisticRegression'],
 #                     ci      = 95)
 (g.set_axis_labels(
     'Trials look back', 'Clasifi.Score (AUC ROC)').fig.suptitle(
         'Model Comparison of Decoding Probability of Success'))
 g.fig.savefig(os.path.join(
     saving_dir, 'Model Comparison of Decoding Probability of Success.png'),
               dpi=500,
               bbox_inches='tight')
 df_post = post_processing(df)
 g = sns.factorplot(
     x='Window',
     y='Values',
     hue='Attributes',
     row='Models',
     row_order=['DecisionTreeClassifier', 'LogisticRegression'],
     data=df_post,
     aspect=3,
     sharey=False,
     dodge=0.1)
 # for seaborn 0.9.0
 #g = sns.catplot(      x       = 'window',
 #                      y       = 'value',
 #                      hue     = 'Attributions',
 #                      row     = 'model',
Exemple #17
0
 pos = pd.read_csv('../results/Pos_3_1_features.csv')
 att = pd.read_csv('../results/ATT_3_1_features.csv')
 # pos
 df = pos.copy()
 id_vars = [
     'model',
     'score',
     'sub',
     'window',
 ]
 value_vars = [
     'correct',
     'awareness',
     'confidence',
 ]
 df_post = post_processing(df[(df['window'] > 0) & (df['window'] < 5)],
                           id_vars, value_vars)
 c = df_post.groupby(['Subjects', 'Models', 'Window',
                      'Attributes']).mean().reset_index()
 # interaction
 level_window = pd.unique(c['Window'])
 level_attribute = pd.unique(c['Attributes'])
 unique_levels = []
 for w in level_window:
     for a in level_attribute:
         unique_levels.append([w, a])
 results = []
 for model_name, df_sub in c.groupby(['Models']):
     # main effect of window
     factor = 'Window'
     result = posthoc_multiple_comparison_scipy(
         df_sub,
Exemple #18
0
                        help="the image to predict (default: %(default)s)")

    parser.add_argument("--weight", required=True, metavar="/path/to/yolov4.weights", help="the path of weight file")

    parser.add_argument("--save-img", metavar="predicted-img", help="the path to save predicted image")

    args = parser.parse_args()

    return args


if __name__ == "__main__":
    args = parse_args()

    img: Image.Image = Image.open(args.img_file)
    img = img.resize((608, 608))

    # C*H*W
    img_data = to_image(img)

    net = Darknet(img_data.size(0))
    net.load_weights(args.weight)
    net.eval()

    with torch.no_grad():
        boxes, confs = net(img_data.unsqueeze(0))

        idxes_pred, boxes_pred, probs_pred = utils.post_processing(boxes, confs, 0.4, 0.6)

    utils.plot_box(boxes_pred, args.img_file, args.save_img)
    vgg.build(noise_img)

    noise_layers_list = dict({0: vgg.conv1_1, 1: vgg.conv1_2, 2: vgg.pool1,
                              3: vgg.conv2_1, 4: vgg.conv2_2, 5: vgg.pool2,
                              6: vgg.conv3_1, 7: vgg.conv3_2, 8: vgg.conv3_3, 9: vgg.pool3,
                              10: vgg.conv4_1, 11: vgg.conv4_2, 12: vgg.conv4_3, 13: vgg.pool4,
                              14: vgg.conv5_1, 15: vgg.conv5_2, 16: vgg.conv5_3, 17: vgg.pool5})

    # we define the same weight for each layer
    m = [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1),(6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1),
         (13, 1)]
    # m = [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1),
    #     (13, 1), (14, 1), (15, 1), (16, 1), (17, 1)]

    loss = loss_function(m, feature_map, noise_layers_list)
    optimizer = tf.train.AdamOptimizer().minimize(loss)
    epochs = 10000

    # init_image = keras.backend.eval(noise_img)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        init_image = sess.run(noise_img)
        for i in range(epochs):
            _, s_loss = sess.run([optimizer, loss])
            if (i+1)%100 == 0:
                print("Epoch:{} / {}".format(i+1, epochs), "Loss:", s_loss)
        final_noise = sess.run(noise_img)

    init_noise = utils.post_processing(init_image, output_img, save_file=False)
    final_image = utils.post_processing(final_noise, output_img, save_file=True)