Example #1
0
import os
import logging

from flask import Flask

import config
from utils import Log

app = Flask(__name__)
app.secret_key = config.secret_key

if not os.path.exists(config.logs_dir):
    os.mkdir(config.logs_dir)

file_handler = logging.FileHandler(config.log_config.get("filename"))
file_handler.setLevel(config.log_config.get("level"))
file_handler.setFormatter(config.log_config.get("format"))

log = Log(file_handler, "Tag API")
log.info("Tag API started.")

import blueprints
Example #2
0
                device), batchs['label'].to(device)
            out = net(sig, other)
            f1, accuracy, recall = f1_score(out.cpu().data.numpy(),
                                            label.cpu().data.numpy())
            f1_all.append(f1)
            acc_all.append(accuracy)
            recall_all.append(recall)
            log.logging_flush(f'      valid         b:{i}/{len(dataloader)}  ')
        f1_all, acc_all, recall_all = np.mean(f1_all), np.mean(
            acc_all), np.mean(recall_all)
        log.logging(f'f1:{f1_all}  acc:{acc_all}  recall:{recall_all}')
        return f1_all


if __name__ == '__main__':
    log = Log(save_dir)

    if torch.cuda.is_available():
        device = torch.device("cuda")
        log.logging('using cuda backend.')
    else:
        device = torch.device("cpu")

    all_split = np.arange(len(train_set))
    train_split, val_split = all_split[:int(len(all_split) * 0.8)], all_split[
        int(len(all_split) * 0.8):]

    dataloader = DataLoader(train_set,
                            batch_size=batch_size,
                            num_workers=n_cpu,
                            sampler=RandomSampler(train_split))
def main(opt):
    dataLoader = trafficDataLoader(opt.taskID, opt.finterval)

    opt.nNode = dataLoader.nNode
    opt.dimFeature = dataLoader.dimFeature
    data = dataLoader.data  # [n, T]

    # scale data using MinMaxScaler
    scaler = MinMaxScaler()
    scaler.fit(data)
    scaler_data = scaler.transform(data)

    # scale data using MaxAbsScaler
    # scaler = MaxAbsScaler()
    # scaler.fit(data)
    # scaler_data = scaler.transform(data)

    data = np.transpose(scaler_data)  # [T, n]
    data = data[np.newaxis, :, :, np.newaxis]

    # load A and set 1 on the diagonal and zeros elsewhere of each node
    A = dataLoader.A
    A = opt.alpha * A + np.eye(opt.nNode)

    if opt.test is not None:
        opt.nNode = opt.test
        data = data[:, :, :opt.nNode, :]
        A = np.eye(opt.nNode)

    # convert data and A to tensor and randomize weights in hState
    data = torch.from_numpy(data)  # [b, T, n, d]
    A = torch.from_numpy(A[np.newaxis, :, :])  # [b, n, n]
    hState = torch.randn(opt.batchSize, opt.dimHidden,
                         opt.nNode).double()  # [b, D, n]

    opt.interval = data.size(1)
    yLastPred = 0

    # set model configuration
    log = Log(opt, timStart)
    net = GRNN(opt)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    # optimizer = torch_core.AdamW(net.parameters(), lr=opt.lr, weight_decay=1e-1)

    net.double()
    print(net)

    if opt.cuda:
        net.cuda()
        criterion.cuda()
        data = data.cuda()
        A = A.cuda()
        hState = hState.cuda()

    # set plot details
    if opt.showNum != None:
        plt.style.use('ggplot')
        plt.figure(1, figsize=(10, 5))
        plt.xlabel(f'Iteration (data on {opt.finterval}-min interval)'
                   )  # iteration = lenBptt + (# propogations - 1)
        plt.ylabel('Scaled Speed')
        plt.title(f'GRNN{opt.grnnID} + Adam: Edge {opt.showNum}')
        # plt.ion # turn interactive mode on

        # create static legend
        data_color = '#484D6D'
        learning_color = '#DA5552'
        patchA = mpatches.Patch(color=data_color, label='data')
        patchB = mpatches.Patch(color=learning_color, label='learning curve')
        plt.legend(handles=[patchA, patchB])

    # use cuDNN along with cuda to train faster
    torch.backends.cudnn.benchmark = True

    # begin training
    for t in range(opt.interval - opt.truncate):
        x = data[:,
                 t:(t + opt.truncate), :, :]  # batch, interval, node, feature
        y = data[:, (t + 1):(t + opt.truncate + 1), :, :]

        for i in range(opt.nIter):
            O, _ = net(x, hState, A)  # data, hState, A
            hState = hState.data

            loss = criterion(O, y)  #criterion(y_true, y_pred)
            MSE = criterion(O[:, -1, :, :], y[:, -1, :, :])
            optimizer.zero_grad()
            loss.backward()  # compute gradients
            optimizer.step()  # update parameters

            # get explained variance score
            variance = explained_variance_score(O[-1, -1, :, -1].data,
                                                y[-1, -1, :, -1].data)

            # log to tensor in matlab file
            # TODO: unscale the results before saving, show timestamp
            log.prediction[:, t, :, :] = O[:, -1, :, :].data
            log.mseLoss[t] = MSE.data
            log.varianceScore[t] = variance

        # show and save log training details
        log.showIterState(t)

        _, hState = net.propogator(x[:, 0, :, :], hState, A)
        hState = hState.data

        # print(O.shape)

        # update plot of training model at each iteration
        if opt.showNum != None:
            if t == 0:
                # x = scaler.inverse_transform(x[-1, :, opt.showNum, -1].cpu().data.numpy())
                # O = scaler.inverse_transform(O[-1, :, opt.showNum, -1].cpu().data.numpy())

                if opt.cuda:
                    transform_x_temp = np.zeros((opt.truncate, data.size(1)))
                    transform_x_temp[:, 0] = np.reshape(
                        x[0, :, opt.showNum, 0].cpu().data.numpy().flatten(),
                        (opt.truncate, 1)).flatten()
                    x = scaler.inverse_transform(
                        transform_x_temp)[:, [0]].flatten()

                    transform_O_temp = np.zeros((opt.truncate, data.size(1)))
                    transform_O_temp[:, 0] = np.reshape(
                        O[0, :, opt.showNum, 0].cpu().data.numpy().flatten(),
                        (opt.truncate, 1)).flatten()
                    O = scaler.inverse_transform(
                        transform_O_temp)[:, [0]].flatten()

                    plt.plot([v for v in range(opt.truncate)],
                             x,
                             color=data_color,
                             linestyle='-',
                             linewidth=1.5)
                    plt.plot([v + 1 for v in range(opt.truncate)],
                             O,
                             color=learning_color,
                             linestyle='-')

                else:

                    transform_x_temp = np.zeros((opt.truncate, data.size(1)))
                    transform_x_temp[:, 0] = np.reshape(
                        x[0, :, opt.showNum, 0].data.numpy().flatten(),
                        (opt.truncate, 1)).flatten()
                    x = scaler.inverse_transform(
                        transform_x_temp)[:, [0]].flatten()

                    transform_O_temp = np.zeros((opt.truncate, data.size(1)))
                    transform_O_temp[:, 0] = np.reshape(
                        O[0, :, opt.showNum, 0].data.numpy().flatten(),
                        (opt.truncate, 1)).flatten()
                    O = scaler.inverse_transform(
                        transform_O_temp)[:, [0]].flatten()

                    plt.plot([v for v in range(opt.truncate)],
                             x,
                             color=data_color,
                             linestyle='-',
                             linewidth=1.5)
                    plt.plot([v + 1 for v in range(opt.truncate)],
                             O,
                             color=learning_color,
                             linestyle='-')

            else:
                # x = scaler.inverse_transform(x[-1, -2:, opt.showNum, -1].cpu().data.numpy())
                # O = scaler.inverse_transform(O[-1, -1, opt.showNum, -1])

                # these blocks are going to explode
                if opt.cuda:
                    transform_x_temp = np.zeros((2, data.size(1)))
                    transform_x_temp[:, 0] = np.reshape(
                        x[0, -2:, opt.showNum, 0].cpu().data.numpy().flatten(),
                        (2, 1)).flatten()
                    x = scaler.inverse_transform(transform_x_temp)[:, [0]]

                    transform_O_temp = np.zeros((1, data.size(1)))
                    transform_O_temp[:, 0] = np.reshape(
                        O[0, -1, opt.showNum, 0].cpu().data.numpy().flatten(),
                        (1, 1)).flatten()
                    O = scaler.inverse_transform(transform_O_temp)[:, [0]]

                    plt.plot([t + opt.truncate - 2, t + opt.truncate - 1],
                             x,
                             color=data_color,
                             linestyle='-',
                             linewidth=1.5)
                    plt.plot([t + opt.truncate - 1, t + opt.truncate],
                             [yLastPred, O],
                             color=learning_color,
                             linestyle='-')

                else:
                    transform_x_temp = np.zeros((2, data.size(1)))
                    transform_x_temp[:, 0] = np.reshape(
                        x[0, -2:, opt.showNum, 0].data.numpy().flatten(),
                        (2, 1)).flatten()
                    x = scaler.inverse_transform(transform_x_temp)[:, [0]]

                    transform_O_temp = np.zeros((1, data.size(1)))
                    transform_O_temp[:, 0] = np.reshape(
                        O[0, -1, opt.showNum, 0].data.numpy().flatten(),
                        (1, 1)).flatten()
                    O = scaler.inverse_transform(transform_O_temp)[:, [0]]

                    plt.plot([t + opt.truncate - 2, t + opt.truncate - 1],
                             x,
                             color=data_color,
                             linestyle='-',
                             linewidth=1.5)
                    plt.plotkind([t + opt.truncate - 1, t + opt.truncate],
                                 [yLastPred, O],
                                 color=learning_color,
                                 linestyle='-')

            plt.savefig(
                f'result/experiment/grnn{opt.grnnID}-{opt.finterval}int-{opt.taskID}tid-{opt.alpha}a-{opt.truncate}T-{opt.dimHidden}D-{opt.nIter}i-{opt.lr}lr-{opt.manualSeed}ms-{opt.batchSize}b-{opt.showNum}sn.jpg'
            )

            # updates the graph at some interval
            plt.draw()
            plt.pause(0.7)
            yLastPred = O[-1]

        log.saveResult(t)

        # save trained model as pt file
        torch.save(
            {
                'epoch': opt.nIter,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': criterion
            },
            f'result/experiment/grnn{opt.grnnID}-{opt.finterval}int-{opt.taskID}tid-{opt.alpha}a-{opt.truncate}T-{opt.dimHidden}D-{opt.nIter}i-{opt.lr}lr-{opt.manualSeed}ms-{opt.batchSize}b-{opt.showNum}sn.pt'
        )

    if opt.showNum != None:
        plt.savefig(
            f'result/experiment/grnn{opt.grnnID}-{opt.finterval}int-{opt.interval}int-{opt.taskID}tid-{opt.alpha}a-{opt.truncate}T-{opt.dimHidden}D-{opt.nIter}i-{opt.lr}lr-{opt.manualSeed}ms-{opt.batchSize}b-{opt.showNum}sn.jpg'
        )
        # plt.ioff()  # interactive mode off
        plt.show()

    # terminate app
    exit(1)
Example #4
0
def main(config, home_path):
    # load model
    if config.model == 'c3d':
        model, params = VioNet_C3D(config, home_path)
    elif config.model == 'convlstm':
        model, params = VioNet_ConvLSTM(config)
    elif config.model == 'densenet':
        model, params = VioNet_densenet(config, home_path)
    elif config.model == 'densenet_lean':
        model, params = VioNet_densenet_lean(config, home_path)
    elif config.model == 'resnet50':
        model, params = VioNet_Resnet(config, home_path)
    elif config.model == 'densenet2D':
        model, params = VioNet_Densenet2D(config)
    elif config.model == 'i3d':
        model, params = VioNet_I3D(config)
    elif config.model == 's3d':
        model, params = VioNet_S3D(config)
    else:
        model, params = VioNet_densenet_lean(config, home_path)

    # dataset
    dataset = config.dataset
    # sample_size = config.sample_size
    stride = config.stride
    sample_duration = config.sample_duration

    # cross validation phase
    cv = config.num_cv
    input_mode = config.input_type

    sample_size, norm = build_transforms_parameters(model_type=config.model)

    # train set
    crop_method = GroupRandomScaleCenterCrop(size=sample_size)

    # if input_mode == 'rgb':
    #     norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    # elif input_mode == 'dynamic-images':
    #     # norm = Normalize([0.49778724, 0.49780366, 0.49776983], [0.09050678, 0.09017131, 0.0898702 ])
    #     norm = Normalize([38.756858/255, 3.88248729/255, 40.02898126/255], [110.6366688/255, 103.16065604/255, 96.29023126/255])
    # else:
    #     norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    train_temporal_transform = build_temporal_transformation(
        config, config.train_temporal_transform)
    spatial_transform = Compose(
        [crop_method,
         GroupRandomHorizontalFlip(),
         ToTensor(), norm])
    target_transform = Label()

    train_batch = config.train_batch
    if dataset == RWF_DATASET:
        # train_data = VioDB(g_path + '/VioDB/{}_jpg/frames/'.format(dataset),
        #                 g_path + '/VioDB/{}_jpg{}.json'.format(dataset, cv), 'training',
        #                 spatial_transform, temporal_transform, target_transform, dataset,
        #                 tmp_annotation_path=os.path.join(g_path, config.temp_annotation_path))
        train_data = VioDB(
            os.path.join(home_path, RWF_DATASET.upper(), 'frames/'),
            os.path.join(home_path, VIO_DB_DATASETS, "rwf-2000_jpg1.json"),
            'training',
            spatial_transform,
            train_temporal_transform,
            target_transform,
            dataset,
            tmp_annotation_path=os.path.join(g_path,
                                             config.temp_annotation_path),
            input_type=config.input_type)
    else:
        train_data = VioDB(
            os.path.join(home_path, VIO_DB_DATASETS, dataset +
                         '_jpg'),  #g_path + '/VioDB/{}_jpg/'.format(dataset),
            os.path.join(home_path, VIO_DB_DATASETS, '{}_jpg{}.json'.format(
                dataset,
                cv)),  #g_path + '/VioDB/{}_jpg{}.json'.format(dataset, cv),
            'training',
            spatial_transform,
            train_temporal_transform,
            target_transform,
            dataset,
            tmp_annotation_path=os.path.join(g_path,
                                             config.temp_annotation_path),
            input_type=config.input_type)
    train_loader = DataLoader(train_data,
                              batch_size=train_batch,
                              shuffle=True,
                              num_workers=0,
                              pin_memory=True)

    # val set
    crop_method = GroupScaleCenterCrop(size=sample_size)
    # if input_mode == 'rgb':
    #     norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    # elif input_mode == 'dynamic-images':
    #     norm = Normalize([0.49778724, 0.49780366, 0.49776983], [0.09050678, 0.09017131, 0.0898702 ])

    val_temporal_transform = build_temporal_transformation(
        config, config.val_temporal_transform)
    spatial_transform = Compose([crop_method, ToTensor(), norm])
    target_transform = Label()

    val_batch = config.val_batch

    if dataset == RWF_DATASET:
        # val_data = VioDB(g_path + '/VioDB/{}_jpg/frames/'.format(dataset),
        #                 g_path + '/VioDB/{}_jpg{}.json'.format(dataset, cv), 'validation',
        #                 spatial_transform, temporal_transform, target_transform, dataset,
        #                 tmp_annotation_path=os.path.join(g_path, config.temp_annotation_path))
        val_data = VioDB(
            os.path.join(home_path, RWF_DATASET.upper(), 'frames/'),
            os.path.join(home_path, VIO_DB_DATASETS, "rwf-2000_jpg1.json"),
            'validation',
            spatial_transform,
            val_temporal_transform,
            target_transform,
            dataset,
            tmp_annotation_path=os.path.join(g_path,
                                             config.temp_annotation_path),
            input_type=config.input_type)
    else:
        val_data = VioDB(
            os.path.join(home_path, VIO_DB_DATASETS, dataset +
                         '_jpg'),  #g_path + '/VioDB/{}_jpg/'.format(dataset),
            os.path.join(home_path, VIO_DB_DATASETS, '{}_jpg{}.json'.format(
                dataset,
                cv)),  #g_path + '/VioDB/{}_jpg{}.json'.format(dataset, cv),
            'validation',
            spatial_transform,
            val_temporal_transform,
            target_transform,
            dataset,
            tmp_annotation_path=os.path.join(g_path,
                                             config.temp_annotation_path),
            input_type=config.input_type)
    val_loader = DataLoader(val_data,
                            batch_size=val_batch,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)

    template = '{}_fps{}_{}_split({})_input({})_TmpTransform({})_Info({})'.format(
        config.model, sample_duration, dataset, cv, input_mode,
        config.train_temporal_transform, config.additional_info)
    log_path = os.path.join(home_path, PATH_LOG, template)
    # chk_path = os.path.join(PATH_CHECKPOINT, template)
    tsb_path = os.path.join(home_path, PATH_TENSORBOARD, template)

    for pth in [log_path, tsb_path]:
        if not os.path.exists(pth):
            os.mkdir(pth)

    print('tensorboard dir:', tsb_path)
    writer = SummaryWriter(tsb_path)

    # log
    batch_log = Log(log_path + '/batch_log.csv',
                    ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
    epoch_log = Log(log_path + '/epoch_log.csv',
                    ['epoch', 'loss', 'acc', 'lr'])
    val_log = Log(log_path + '/val_log.csv', ['epoch', 'loss', 'acc'])
    train_val_log = Log(
        log_path + '/train_val_LOG.csv',
        ['epoch', 'train_loss', 'train_acc', 'lr', 'val_loss', 'val_acc'])

    # prepare
    criterion = nn.CrossEntropyLoss().to(device)
    learning_rate = config.learning_rate
    momentum = config.momentum
    weight_decay = config.weight_decay

    optimizer = torch.optim.SGD(params=params,
                                lr=learning_rate,
                                momentum=momentum,
                                weight_decay=weight_decay)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, verbose=True, factor=config.factor, min_lr=config.min_lr)

    acc_baseline = config.acc_baseline
    loss_baseline = 1

    for i in range(config.num_epoch):
        train_loss, train_acc, lr = train(i, train_loader, model, criterion,
                                          optimizer, device, batch_log,
                                          epoch_log)
        val_loss, val_acc = val(i, val_loader, model, criterion, device,
                                val_log)
        epoch = i + 1
        train_val_log.log({
            'epoch': epoch,
            'train_loss': train_loss,
            'train_acc': train_acc,
            'lr': lr,
            'val_loss': val_loss,
            'val_acc': val_acc
        })
        writer.add_scalar('training loss', train_loss, epoch)
        writer.add_scalar('training accuracy', train_acc, epoch)
        writer.add_scalar('validation loss', val_loss, epoch)
        writer.add_scalar('validation accuracy', val_acc, epoch)

        scheduler.step(val_loss)
        if val_acc > acc_baseline or (val_acc >= acc_baseline
                                      and val_loss < loss_baseline):
            torch.save(
                model.state_dict(),
                os.path.join(
                    home_path, PATH_CHECKPOINT,
                    '{}_fps{}_{}{}_{}_{:.4f}_{:.6f}.pth'.format(
                        config.model, sample_duration, dataset, cv, epoch,
                        val_acc, val_loss)))
            acc_baseline = val_acc
            loss_baseline = val_loss