Esempio n. 1
0
def delay_20(agi):
    util.metric(agi, 'delay-20')
    agi.appexec('wait', 5)
    util.say(agi, 'please-hold')
    agi.appexec('wait', 1)
    util.say(agi, 'for-the-next-available-outgoing-line')
    agi.appexec('wait', 3)
    agi.appexec('MusicOnHold', ',6')
    agi.appexec('wait', 1)
Esempio n. 2
0
def yt_scan(bot, url):
    if bot.config.get("module: open_graph", "youtube") == "off":
        return None

    url = re.sub("http(s)?://", "", url)
    regex = "^(?:www\\.)?(?:(?:youtube\\.com/(?:watch)?(?:[?&][a-z]+=[a-z_]+)?(?:[?&]v=))|(?:youtu\\.be\\/))([a-zA-Z0-9-_]+)"
    apiurl = "http://gdata.youtube.com/feeds/api/videos/%s?v=2&alt=jsonc" % re.findall(regex, url)[0]
    bot._debug("API URL: %s" % apiurl)
    result = urllib2.urlopen(apiurl)
    jsondata = json.load(result)

    title = jsondata['data']['title']
    author = jsondata['data']['uploader']
    length = util.time_metric(secs=jsondata['data']['duration'])
    likes = util.metric(int(jsondata['data']['likeCount']))
    dislikes = util.metric(int(jsondata['data']['ratingCount']) - int(jsondata['data']['likeCount']))
    views = util.metric(int(jsondata['data']['viewCount']))

    fmt = "\x02You\x034Tube\x0f: \x02%s\x0f by %s [%s] [\x033+%s\x0f \x035-%s\x0f] [%sv]" % (title, author, length, likes, dislikes, views)
    util.answer(bot, fmt.encode('utf-8'))
def train(train_loader):
    epoch_loss = []
    train_pred = []
    train_label = []
    for i, (x, y) in enumerate(train_loader, 0):
        x = x.cuda()
        y = y.cuda()
        optimizer.zero_grad()
        normalization_label = y[:, :, :, 0].cpu().detach().numpy()

        # STGCN
        # out = net(A_wave, x)
        out = net(x)

        pred = util.re_normalization(out, mean[0], std[0])
        label = util.re_normalization(normalization_label, mean[0], std[0])

        loss = criterion(out, y[:, :, :, 0])
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.detach().cpu().numpy())

        train_pred.append(pred.cpu().detach().numpy())
        train_label.append(label)

        if (i + 1) % 20 == 0:
            pred = np.concatenate(train_pred, axis=0)
            label = np.concatenate(train_label, axis=0)
            train_mae, train_rmse, train_mape, b = util.metric(pred, label)
            print("[epoch %d][%d/%d] loss: %.4f mae: %.4f rmse: %.4f " %
                  (epoch, i + 1, len(train_loader), loss.item(), train_mae,
                   train_rmse))

    train_mae, train_rmse, train_mape, b = util.metric(train_pred, train_label)

    return train_rmse, sum(epoch_loss)
Esempio n. 4
0
def eval(model, data_eval, voc_size, epoch):
    eval_len = len(data_eval)
    # evaluate
    print('')
    model.eval()
    y_pred_prob = np.zeros((eval_len, voc_size[-1]))
    y_gt = y_pred_prob.copy()
    y_pred = y_pred_prob.copy()

    for step, input in enumerate(data_eval):
        pre_outputs, pre_labels, last_outputs, last_labels = model(input)
        last_outputs = F.softmax(last_outputs, dim=-1)
        last_v, last_arg = torch.max(last_outputs, dim=-1)
        last_v = last_v.detach().cpu().numpy()
        last_arg = last_arg.detach().cpu().numpy()

        def filter_other_token(x):
            if x[1] >= voc_size[-1]:
                return False
            return True

        try:
            last_v, last_arg = zip(
                *filter(filter_other_token, zip(last_v, last_arg)))
        except Exception:
            last_v, last_arg = [], []

        last_v, last_arg = list(last_v), list(last_arg)
        target = last_labels.detach().cpu().numpy()[:-1]  # remove end token

        pred_prob = np.zeros(voc_size[-1])
        pred_prob[last_arg] = last_v
        pred = pred_prob.copy()
        pred[last_arg] = 1
        y_pred[step, :] = pred
        y_pred_prob[step, :] = pred_prob
        y_gt[step, target] = 1

        llprint('\rEval--Epoch: %d, Step: %d/%d' %
                (epoch, step, len(data_eval)))

    js, auc, p_1, p_3, p_5, f1, auprc = metric(y_gt, y_pred, y_pred_prob)
    llprint(
        '\tJS: %.4f, AUC: %.4f, P1: %.4f, P3: %.4f, P5: %.4f, F1: %.4f, AUPRC: %.4F\n'
        % (js, auc, p_1, p_3, p_5, f1, auprc))
Esempio n. 5
0
def vmauthenticate(agi):
    """Authenticate a voice mailbox and continue, or busy."""
    util.metric(agi, 'friction-vmauthenticate')
    # Note vmauthenticate lets user jump to 'a' extension if existing,
    # so don't call this in a context that defines that!
    try:
        util.say(agi, 'authenticate-with-your-voice-mail-box-to-continue')
        res = agi.appexec('VMAuthenticate')
    except Exception as exc:
        # we expect AGIAppError('Error executing application, or hangup',)
        util.metric(agi, 'friction-vmauthenticate-deny')
        agi.appexec('busy')
        # above command should not exit
    else:
        util.metric(agi, 'friction-vmauthenticate-allow')
Esempio n. 6
0
def vmauthenticate(agi):
    """Authenticate a voice mailbox and continue, or busy."""
    util.metric(agi, 'friction-vmauthenticate')
    # Note vmauthenticate lets user jump to 'a' extension if existing,
    # so don't call this in a context that defines that!
    try:
        util.say(agi, 'authenticate-with-your-voice-mail-box-to-continue')
        res = agi.appexec('VMAuthenticate')
    except Exception as exc:
        # we expect AGIAppError('Error executing application, or hangup',)
        util.metric(agi, 'friction-vmauthenticate-deny')
        agi.appexec('busy')
        # above command should not exit
    else:
        util.metric(agi, 'friction-vmauthenticate-allow')
Esempio n. 7
0
def delay_10(agi):
    util.metric(agi, 'delay-10')
    agi.appexec('wait', 5)
    agi.appexec('MusicOnHold', ',5')
Esempio n. 8
0
def context_restricted_dialtone(agi):
    util.metric(agi, 'friction-context-restricted-dialtone')
    agi.set_context('restricted-outgoing-dialtone-wrapper')
    agi.set_extension('s')
    agi.set_priority(1)
Esempio n. 9
0
def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    model = gwnet(device,
                  args.num_nodes,
                  args.dropout,
                  supports=supports,
                  gcn_bool=args.gcn_bool,
                  addaptadj=args.addaptadj,
                  aptinit=adjinit)
    model.to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()

    print('model load successfully')

    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds = model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = scaler.inverse_transform(realy[:, :, i])
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))

    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)),
                        dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp * (1 / np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb" + '.pdf')

    y12 = np.array(scaler.inverse_transform(realy[:, 99, 11]))
    yhat12 = np.array(scaler.inverse_transform(yhat[:, 99, 11]))

    y3 = np.array(scaler.inverse_transform(realy[:, 99, 2]))
    yhat3 = np.array(scaler.inverse_transform(yhat[:, 99, 2]))

    df2 = pd.DataFrame({
        'real12': y12,
        'pred12': yhat12,
        'real3': y3,
        'pred3': yhat3
    })
    df2.to_csv('./wave.csv', index=False)
Esempio n. 10
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None



    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
                         args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
                         adjinit)


    print("start training...",flush=True)
    his_loss =[]
    val_time = []
    train_time = []
    for i in range(1,args.epochs+1):
        #if i % 10 == 0:
            #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
            #for g in engine.optimizer.param_groups:
                #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx= trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:,0,:,:])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0 :
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
        t2 = time.time()
        train_time.append(t2-t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []


        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:,0,:,:])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i,(s2-s1)))
        val_time.append(s2-s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)),flush=True)
        torch.save(engine.model.state_dict(), args.save+"_epoch_"+str(i)+"_"+str(round(mvalid_loss,2))+".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(torch.load(args.save+"_epoch_"+str(bestid+1)+"_"+str(round(his_loss[bestid],2))+".pth"))


    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1,3)[:,0,:,:]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1,3)
        with torch.no_grad():
            preds = engine.model(testx).transpose(1,3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs,dim=0)
    yhat = yhat[:realy.size(0),...]


    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid],4)))


    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:,:,i])
        real = realy[:,:,i]
        metrics = util.metric(pred,real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))
    torch.save(engine.model.state_dict(), args.save+"_exp"+str(args.expid)+"_best_"+str(round(his_loss[bestid],2))+".pth")
Esempio n. 11
0
def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    eR_seq_size = 24  # 24
    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix_train)
    scaler = dataloader['scaler']

    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix,
                                   scaler=scaler)

    blocks = int(dataloader[f'x_test{args.suffix}'].shape[1] /
                 3)  # Every block reduce the input sequence size by 3.
    print(f'blocks = {blocks}')

    if args.eRec:
        error_size = 6
        model = eRGwnet(device,
                        args.num_nodes,
                        args.dropout,
                        supports=supports,
                        gcn_bool=args.gcn_bool,
                        addaptadj=args.addaptadj,
                        adjinit=adjinit,
                        in_dim=args.in_dim,
                        out_dim=args.seq_length,
                        residual_channels=args.nhid,
                        dilation_channels=args.nhid,
                        skip_channels=args.nhid * 8,
                        end_channels=args.nhid * 16,
                        blocks=blocks,
                        error_size=error_size)
    else:
        model = gwnet(device,
                      args.num_nodes,
                      args.dropout,
                      supports=supports,
                      gcn_bool=args.gcn_bool,
                      addaptadj=args.addaptadj,
                      adjinit=adjinit,
                      in_dim=args.in_dim,
                      out_dim=args.seq_length,
                      residual_channels=args.nhid,
                      dilation_channels=args.nhid,
                      skip_channels=args.nhid * 8,
                      end_channels=args.nhid * 16,
                      blocks=blocks)
    model.to(device)
    model.load_state_dict(
        torch.load(args.checkpoint, map_location=torch.device(device)))
    model.eval()

    print('model load successfully')
    outputs = []
    realy = torch.Tensor(dataloader[f'y_test{args.suffix}']).to(device)
    if args.eRec:
        realy = realy.transpose(0, 1)[-1, :, :, :, :]
    realy = realy.transpose(1, 3)[:, 0, :, :]
    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testy = torch.Tensor(y).to(device)
        if args.eRec:
            testx = testx.transpose(0, 1)
            testy = testy.transpose(0, 1)
        testx = testx.transpose(-3, -1)
        testy = testy.transpose(-3, -1)
        # print(f'testx.shape = {testx.shape}')
        with torch.no_grad():
            if args.eRec:
                preds = model(testx, testy[:, :, 0:1, :, :],
                              scaler).transpose(1, 3)
            else:
                preds = model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    print(f'yhat before shape = {yhat.shape}')
    yhat = yhat[:realy.size(0), ...]
    print(f'yhat shape = {yhat.shape}')

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))

    mse = nn.MSELoss(reduction='none')
    mae = nn.L1Loss(reduction='none')
    pred = scaler.inverse_transform(yhat)
    loss_mse = mse(pred, realy).transpose(1, 2)
    loss_mae = mae(pred, realy).transpose(1, 2)
    print(f'loss_mae shape = {loss_mae.shape}')
    if not args.eRec:
        model_name = f'gwnet{args.suffix}'
    else:
        model_name = f'eRgwnet{args.suffix}'
    print_results(model_name, loss_mse, loss_mae)
    plot_results(model_name, loss_mae, detector=1)

    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)),
                        dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp * (1 / np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb" + '.pdf')

    y12 = realy[:, -1, -1].cpu().detach().numpy()
    yhat12 = scaler.inverse_transform(yhat[:, -1, -1]).cpu().detach().numpy()

    y3 = realy[:, -1, 2].cpu().detach().numpy()
    yhat3 = scaler.inverse_transform(yhat[:, -1, 2]).cpu().detach().numpy()

    df2 = pd.DataFrame({
        'real12': y12,
        'pred12': yhat12,
        'real3': y3,
        'pred3': yhat3
    })
    df2.to_csv('./wave.csv', index=False)
Esempio n. 12
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data

    frequencies = np.array([
        8.176, 8.662, 9.177, 9.723, 10.301, 10.913, 11.562, 12.250, 12.978,
        13.750, 14.568, 15.434, 16.352, 17.324, 18.354, 19.445, 20.601, 21.826,
        23.124, 24.499, 25.956, 27.500, 29.135, 30.867, 32.703, 34.648, 36.708,
        38.890, 41.203, 43.653, 46.249, 48.999, 51.913, 55.000, 58.270, 61.735,
        65.406, 69.295, 73.416, 77.781, 82.406, 87.307, 92.499, 97.998, 103.82,
        110.00, 116.54, 123.47, 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
        184.99, 195.99, 207.65, 220.00, 233.08, 246.94, 261.63, 277.18, 293.66,
        311.13, 329.63, 349.23, 369.99, 391.99, 415.31, 440.00, 466.16, 439.88,
        523.25, 554.37, 587.33, 622.25, 659.26, 698.46, 739.99, 783.99, 830.61,
        880.00, 932.32, 987.77, 1046.5, 1108.7, 1174.7, 1244.5, 1318.5, 1396.9,
        1480.0, 1568.0, 1661.2, 1760.0, 1864.7, 1975.5, 2093.0, 2217.5, 2349.3,
        2489.0, 2637.0, 2793.8, 2960.0, 3136.0, 3322.4, 3520.0, 3729.3, 3951.1,
        4186.0, 4434.9, 4698.6, 4978.0, 5274.0, 5587.7, 5919.9, 6271.9, 6644.9,
        7040.0, 7458.6, 7902.1, 8372.0, 8869.8, 9397.3, 9956.1, 10548.1,
        11175.3, 11839.8, 12543.9
    ])
    piano_adj = np.zeros((128, 128))
    for row in range(128):
        piano_adj[row] = frequencies - frequencies[row]
    print(piano_adj[10:20, 10:20])

    device = torch.device(args.device)
    adj_mx = util.load_piano_adj(piano_adj, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(type(adj_mx))
    print(len(adj_mx))
    for elem in adj_mx:
        print(type(elem))
        print(elem[10:20, 10:20])
        print(elem.shape)
    print(args)

    time.sleep(2)
    print("tsadf" + 24)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    # print(adjinit)
    # # print(supports[0])
    # print("sfdssg" + 234)

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "_epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds = engine.model(testx).transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
        "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
                pred = util.re_normalization(out, mean[0], std[0])
                label = util.re_normalization(normalization_label, mean[0],
                                              std[0])

                loss = criterion(out, y[:, :, :, 0])
                val_losses.append(loss.detach().cpu().numpy())

                val_pred.append(pred.cpu().detach().numpy())
                val_label.append(label)
            val_loss = sum(val_losses)
            validation_losses.append(val_loss)
            val_pred_ = np.concatenate(val_pred, axis=0)
            val_label_ = np.concatenate(val_label, axis=0)

            val_mae, val_rmse, val_mape, b = util.metric(val_pred_, val_label_)

            validation_rmse_set.append(val_rmse)
            validation_MAE_set.append(val_mae)
            end = process_time()
            running_time_set.append(end - start)

            if val_rmse < best_val_loss:
                stop = 0
                best_val_loss = val_rmse
                params_filename = osp.join(args.save, 'params')
                torch.save(
                    {
                        'epoch': epoch,
                        'model_state_dict': net.state_dict(),
                        'opt_state_dict': optimizer.state_dict(),
Esempio n. 14
0
def main():
    #set seed
    #torch.manual_seed(args.seed)
    #np.random.seed(args.seed)
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    #scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)
    if args.model == 'gwnet':
        engine = trainer1(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'ASTGCN_Recent':
        engine = trainer2(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'GRCN':
        engine = trainer3(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'Gated_STGCN':
        engine = trainer4(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'H_GCN_wh':
        engine = trainer5(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)

    elif args.model == 'OGCRNN':
        engine = trainer8(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'OTSGGCN':
        engine = trainer9(args.in_dim, args.seq_length, args.num_nodes,
                          args.nhid, args.dropout, args.learning_rate,
                          args.weight_decay, device, supports, args.decay)
    elif args.model == 'LSTM':
        engine = trainer10(args.in_dim, args.seq_length, args.num_nodes,
                           args.nhid, args.dropout, args.learning_rate,
                           args.weight_decay, device, supports, args.decay)
    elif args.model == 'GRU':
        engine = trainer11(args.in_dim, args.seq_length, args.num_nodes,
                           args.nhid, args.dropout, args.learning_rate,
                           args.weight_decay, device, supports, args.decay)

    # check parameters file
    params_path = args.save + "/" + args.model
    if os.path.exists(params_path) and not args.force:
        raise SystemExit(
            "Params folder exists! Select a new params path please!")
    else:
        if os.path.exists(params_path):
            shutil.rmtree(params_path)
        os.makedirs(params_path)
        print('Create params directory %s' % (params_path))

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mae = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mae.append(metrics[1])
            train_mape.append(metrics[2])
            train_rmse.append(metrics[3])
            #if iter % args.print_every == 0 :
            #   log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
            #  print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]),flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mae = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()

        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mae.append(metrics[1])
            valid_mape.append(metrics[2])
            valid_rmse.append(metrics[3])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mae = np.mean(train_mae)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mae = np.mean(valid_mae)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAE: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAE: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mae, mtrain_mape, mtrain_rmse,
                         mvalid_loss, mvalid_mae, mvalid_mape, mvalid_rmse,
                         (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), params_path + "/" + args.model +
            "_epoch_" + str(i) + "_" + str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(params_path + "/" + args.model + "_epoch_" +
                   str(bestid + 1) + "_" + str(round(his_loss[bestid], 2)) +
                   ".pth"))
    engine.model.eval()

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        with torch.no_grad():
            preds, spatial_at, parameter_adj = engine.model(testx)
            preds = preds.transpose(1, 3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    prediction = yhat
    for i in range(12):
        pred = prediction[:, :, i]
        #pred = scaler.inverse_transform(yhat[:,:,i])
        #prediction.append(pred)
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), params_path + "/" + args.model + "_exp" +
        str(args.expid) + "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
    prediction_path = params_path + "/" + args.model + "_prediction_results"
    ground_truth = realy.cpu().detach().numpy()
    prediction = prediction.cpu().detach().numpy()
    spatial_at = spatial_at.cpu().detach().numpy()
    parameter_adj = parameter_adj.cpu().detach().numpy()
    np.savez_compressed(os.path.normpath(prediction_path),
                        prediction=prediction,
                        spatial_at=spatial_at,
                        parameter_adj=parameter_adj,
                        ground_truth=ground_truth)
Esempio n. 15
0
def main():
    frequencies = np.array([
        8.176, 8.662, 9.177, 9.723, 10.301, 10.913, 11.562, 12.250, 12.978,
        13.750, 14.568, 15.434, 16.352, 17.324, 18.354, 19.445, 20.601, 21.826,
        23.124, 24.499, 25.956, 27.500, 29.135, 30.867, 32.703, 34.648, 36.708,
        38.890, 41.203, 43.653, 46.249, 48.999, 51.913, 55.000, 58.270, 61.735,
        65.406, 69.295, 73.416, 77.781, 82.406, 87.307, 92.499, 97.998, 103.82,
        110.00, 116.54, 123.47, 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
        184.99, 195.99, 207.65, 220.00, 233.08, 246.94, 261.63, 277.18, 293.66,
        311.13, 329.63, 349.23, 369.99, 391.99, 415.31, 440.00, 466.16, 439.88,
        523.25, 554.37, 587.33, 622.25, 659.26, 698.46, 739.99, 783.99, 830.61,
        880.00, 932.32, 987.77, 1046.5, 1108.7, 1174.7, 1244.5, 1318.5, 1396.9,
        1480.0, 1568.0, 1661.2, 1760.0, 1864.7, 1975.5, 2093.0, 2217.5, 2349.3,
        2489.0, 2637.0, 2793.8, 2960.0, 3136.0, 3322.4, 3520.0, 3729.3, 3951.1,
        4186.0, 4434.9, 4698.6, 4978.0, 5274.0, 5587.7, 5919.9, 6271.9, 6644.9,
        7040.0, 7458.6, 7902.1, 8372.0, 8869.8, 9397.3, 9956.1, 10548.1,
        11175.3, 11839.8, 12543.9
    ])
    piano_adj = np.zeros((128, 128))
    for row in range(128):
        piano_adj[row] = frequencies - frequencies[row]

    device = torch.device(args.device)
    adj_mx = util.load_piano_adj(piano_adj, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    model = gwnet(device,
                  args.num_nodes,
                  args.dropout,
                  supports=supports,
                  gcn_bool=args.gcn_bool,
                  addaptadj=args.addaptadj,
                  aptinit=adjinit,
                  in_dim=args.in_dim,
                  out_dim=args.seq_length,
                  residual_channels=args.nhid,
                  dilation_channels=args.nhid,
                  skip_channels=args.nhid * 8,
                  end_channels=args.nhid * 16)
    model.to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()

    print('model load successfully')

    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        # print(x.shape)
        testx = testx.transpose(1, 3)
        # print(x.shape)
        with torch.no_grad():
            preds = model(testx).transpose(1, 3)
        # print(preds.shape)
        outputs.append(preds.squeeze())
        break

    # print(len(outputs))
    # print(outputs[0].shape)
    yhat = torch.cat(outputs, dim=0)
    # print(yhat.shape)
    yhat = yhat[:realy.size(0), ...]
    # print(yhat.shape)

    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:, :, i])
        # print(pred.shape)
        # time.sleep(1)
        # print("asdf" + 234)
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))

    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)),
                        dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp * (1 / np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb" + '.pdf')

    y12 = realy[:, 99, 11].cpu().detach().numpy()
    yhat12 = scaler.inverse_transform(yhat[:, 99, 11]).cpu().detach().numpy()

    y3 = realy[:, 99, 2].cpu().detach().numpy()
    yhat3 = scaler.inverse_transform(yhat[:, 99, 2]).cpu().detach().numpy()

    df2 = pd.DataFrame({
        'real12': y12,
        'pred12': yhat12,
        'real3': y3,
        'pred3': yhat3
    })
    df2.to_csv('./wave.csv', index=False)
Esempio n. 16
0
def main():
    #set seed
    args.seed = args.seed if args.seed else \
        np.random.randint(0, np.iinfo("uint32").max, size=1)[-1]
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(args.seed)

    # update run_name & save_dir
    args.run_name += "_".join(
        [args.data_type, str(args.seq_length),
         str(args.seed)])
    args.save += args.run_name + "/"
    os.makedirs(args.save)
    wandb.init(config=args, project=args.project_name, name=args.run_name)

    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,
                                   args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit, args.impute_type)

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in tqdm(range(1, args.epochs + 1)):
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        if i > 1:
            # Skip shuffling for 1st epoch for data imputation
            dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                trainx = engine.imputer(x.transpose(1, 3),
                                        engine.model.get_supports())
            else:
                trainx = x.transpose(1, 3)
            trainx = trainx.to(device)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            if i == 1 or engine.imputer.type == "GCN":
                testx = engine.imputer(x.transpose(1, 3),
                                       engine.model.get_supports())
            else:
                testx = x.transpose(1, 3)
            testx = testx.to(device)
            testy = torch.Tensor(y).to(device)
            testy = testy.transpose(1, 3)
            metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
        wandb.log(
            {
                "Train MAE": mtrain_loss,
                "Train MAPE": mtrain_mape,
                "Train RMSE": mtrain_rmse,
                "Validation MAE": mvalid_loss,
                "Validation MAPE": mvalid_mape,
                "Validation RMSE": mvalid_rmse
            },
            step=i)
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        with torch.no_grad():
            testx = engine.imputer(x.transpose(1, 3),
                                   engine.model.get_supports())
            testx = testx.to(device)
            preds = engine.model(testx).transpose(1, 3)
        outputs.append(preds.squeeze(1))

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

        wandb.log(
            {
                "Test MAE": metrics[0],
                "Test MAPE": metrics[1],
                "Test RMSE": metrics[2]
            },
            step=i + args.epochs + 1)

    log = 'On average over horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    wandb.log({
        "Avg Test MAE": np.mean(amae),
        "Avg Test MAPE": np.mean(amape),
        "Avg Test RMSE": np.mean(armse)
    })
    torch.save(engine.model.state_dict(),
               args.save + "best_" + str(round(his_loss[bestid], 2)) + ".pth")
Esempio n. 17
0
def main():
    #load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    sensor_ids_cluster, sensor_id_to_ind_cluster, adj_mx_cluster = util.load_adj(
        args.adjdatacluster, args.adjtype)
    dataloader = util.load_dataset_cluster(args.data, args.batch_size,
                                           args.batch_size, args.batch_size)
    #scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    supports_cluster = [torch.tensor(i).to(device) for i in adj_mx_cluster]
    transmit_np = np.float32(np.loadtxt(args.transmit, delimiter=','))
    transmit = torch.tensor(transmit_np).to(device)

    print(args)

    if args.model == 'H_GCN':
        engine = trainer7(args.in_dim, args.in_dim_cluster, args.seq_length,
                          args.num_nodes, args.cluster_nodes, args.nhid,
                          args.dropout, args.learning_rate, args.weight_decay,
                          device, supports, supports_cluster, transmit,
                          args.decay)
    elif args.model == 'H_GCN_wdf':
        engine = trainer6(args.in_dim, args.in_dim_cluster, args.seq_length,
                          args.num_nodes, args.cluster_nodes, args.nhid,
                          args.dropout, args.learning_rate, args.weight_decay,
                          device, supports, supports_cluster, transmit,
                          args.decay)
    # check parameters file
    params_path = args.save + "/" + args.model
    if os.path.exists(params_path) and not args.force:
        raise SystemExit(
            "Params folder exists! Select a new params path please!")
    else:
        if os.path.exists(params_path):
            shutil.rmtree(params_path)
        os.makedirs(params_path)
        print('Create params directory %s' % (params_path))

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        train_loss = []
        train_mae = []
        train_mape = []
        train_rmse = []
        t1 = time.time()

        dataloader['train_loader_cluster'].shuffle()

        for iter, (x, y, x_cluster, y_cluster) in enumerate(
                dataloader['train_loader_cluster'].get_iterator()):

            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            trainx_cluster = torch.Tensor(x_cluster).to(device)
            trainx_cluster = trainx_cluster.transpose(1, 3)
            trainy_cluster = torch.Tensor(y_cluster).to(device)
            trainy_cluster = trainy_cluster.transpose(1, 3)
            metrics = engine.train(trainx, trainx_cluster, trainy[:, 0, :, :],
                                   trainy_cluster)
            train_loss.append(metrics[0])
            train_mae.append(metrics[1])
            train_mape.append(metrics[2])
            train_rmse.append(metrics[3])

        #engine.scheduler.step()
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mae = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()

        for iter, (x, y, x_cluster, y_cluster) in enumerate(
                dataloader['val_loader_cluster'].get_iterator()):
            validx = torch.Tensor(x).to(device)
            validx = validx.transpose(1, 3)
            validy = torch.Tensor(y).to(device)
            validy = validy.transpose(1, 3)
            validx_cluster = torch.Tensor(x_cluster).to(device)
            validx_cluster = validx_cluster.transpose(1, 3)
            validy_cluster = torch.Tensor(y_cluster).to(device)
            validy_cluster = validy_cluster.transpose(1, 3)
            metrics = engine.eval(validx, validx_cluster, validy[:, 0, :, :],
                                  validy_cluster)
            valid_loss.append(metrics[0])
            valid_mae.append(metrics[1])
            valid_mape.append(metrics[2])
            valid_rmse.append(metrics[3])

        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mae = np.mean(train_mae)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mae = np.mean(valid_mae)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAE: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAE: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mae, mtrain_mape, mtrain_rmse,
                         mvalid_loss, mvalid_mae, mvalid_mape, mvalid_rmse,
                         (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), params_path + "/" + args.model +
            "_epoch_" + str(i) + "_" + str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(params_path + "/" + args.model + "_epoch_" +
                   str(bestid + 1) + "_" + str(round(his_loss[bestid], 2)) +
                   ".pth"))
    engine.model.eval()

    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)

    realy = realy.transpose(1, 3)[:, 0, :, :]
    #print(realy.shape)
    for iter, (x, y, x_cluster, y_cluster) in enumerate(
            dataloader['test_loader_cluster'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        testx_cluster = torch.Tensor(x_cluster).to(device)
        testx_cluster = testx_cluster.transpose(1, 3)
        with torch.no_grad():
            preds, _, _ = engine.model(testx, testx_cluster)
            preds = preds.transpose(1, 3)
        outputs.append(preds.squeeze())
    for iter, (x, y, x_cluster, y_cluster) in enumerate(
            dataloader['test_loader_cluster'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1, 3)
        testx_cluster = torch.Tensor(x_cluster).to(device)
        testx_cluster = testx_cluster.transpose(1, 3)
        with torch.no_grad():
            _, spatial_at, parameter_adj = engine.model(testx, testx_cluster)
        break

    yhat = torch.cat(outputs, dim=0)
    yhat = yhat[:realy.size(0), ...]

    #print(yhat.shape)
    print("Training finished")
    print("The valid loss on best model is", str(round(his_loss[bestid], 4)))

    amae = []
    amape = []
    armse = []
    prediction = yhat
    for i in range(12):
        pred = prediction[:, :, i]
        #pred = scaler.inverse_transform(yhat[:,:,i])
        #prediction.append(pred)
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
    torch.save(
        engine.model.state_dict(), params_path + "/" + args.model + "_exp" +
        str(args.expid) + "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
    prediction_path = params_path + "/" + args.model + "_prediction_results"
    ground_truth = realy.cpu().detach().numpy()
    prediction = prediction.cpu().detach().numpy()
    spatial_at = spatial_at.cpu().detach().numpy()
    parameter_adj = parameter_adj.cpu().detach().numpy()
    np.savez_compressed(os.path.normpath(prediction_path),
                        prediction=prediction,
                        spatial_at=spatial_at,
                        parameter_adj=parameter_adj,
                        ground_truth=ground_truth)
Esempio n. 18
0
def busy(agi):
    util.metric(agi, 'friction-busy')
    agi.appexec('busy')
Esempio n. 19
0
def delay_5(agi):
    util.metric(agi, 'delay-5')
    agi.appexec('wait', 5)
Esempio n. 20
0
def delay_10(agi):
    util.metric(agi, 'friction-delay-10')
    agi.appexec('wait', 5)
    agi.appexec('MusicOnHold', ',5')
Esempio n. 21
0
def delay_5(agi):
    util.metric(agi, 'friction-delay-5')
    agi.appexec('wait', 5)
Esempio n. 22
0
File: hub.py Progetto: txanatan/xbot
def gh_linkscan(bot, url):
    try:
        if not re.match("http(s)?://github.com", url):
            bot._debug("I don't know what %s is..." % url)
            return None

        gh = gh_inst(bot)
        if isinstance(gh, basestring):
            bot._debug("Error: %s" % gh)
            return gh

        r_repo = "^https?://github.com/([A-Za-z0-9-]+)/([A-Za-z0-9-\.]+)"
        r_commit = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/commit/([A-Za-z0-9]+)"
        r_blob = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/blob/([A-Za-z0-9]+)/(.*)"
        r_tree = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/tree/([A-Za-z0-9]+)/(.*)"
        r_issue = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/(issues|pull)/(\d+)"

        r = re.match(r_repo, url)
        if not r:
            return None

        repo = "%s/%s" % r.group(1, 2)
        ghrepo = gh.get_repo(repo)

        bot._debug("Repo: %s" % repo)

        commit = re.match(r_commit, url)
        blob = re.match(r_blob, url)
        tree = re.match(r_tree, url)
        issue = re.match(r_issue, url)

        if commit:
            bot._debug("Commit SHA: %s" % commit.group(1))
            commit = ghrepo.get_commit(commit.group(1))
            fmt = "GitHub: \x02%s\x0f commit \x02%s\x0f: %s [/%s] [\x033+%s\x0f \x035-%s\x0f]" % (repo, commit.sha[:8], commit.commit.message, commit.author.login, util.metric(commit.stats.additions), util.metric(commit.stats.deletions))
            return fmt.encode('utf-8')
        elif blob:
            bot._debug("Blob: [%s] %s" % blob.group(1, 2))
            ref = blob.group(1)
            blob = ghrepo.get_contents(path=blob.group(2), ref=ref)
            fmt = "GitHub: \x02%s\x0f file \x02%s\x0f [%s, branch %s]" % (repo, blob.name, util.metric(blob.size), ref)
            return fmt.encode('utf-8')
        elif tree:
            bot._debug("Tree: [%s] %s" % tree.group(1, 2))
            ref, path = tree.group(1, 2)
            tree = ghrepo.get_dir_contents(path=path, ref=ref)
            fmt = "GitHub: \x02%s\x0f dir \x02%s\x0f [%s files, branch %s]" % (repo, path, util.metric(len(tree)), ref)
            return fmt.encode('utf-8')
        elif issue:
            id = issue.group(2)
            bot._debug("Issue ID: #%s" % id)
            issue = ghrepo.get_issue(int(id))
            assigned_to = issue.assignee.login if issue.assignee else 'no one'
            if issue.state == "open":
                fmt = "GitHub: \x02%s\x0f issue \x02#%s\x0f: %s [by %s, %s assigned, created %s, updated %s]" % (repo, id, issue.title, issue.user.login, assigned_to, util.pretty_date(issue.created_at), util.pretty_date(issue.updated_at))
            else:
                fmt = "GitHub: \x02%s\x0f issue \x02#%s\x0f: %s [by %s, \x035closed\x0f by %s %s]" % (repo, id, issue.title, issue.user.login, issue.closed_by.login, util.pretty_date(issue.closed_at))
            return fmt.encode('utf-8')
        else:
            forks = str(ghrepo.forks)
            watchers = str(ghrepo.watchers)
            fmt = "GitHub: \x02%s\x0f [%sf %sw] last updated %s" % (repo, forks, watchers, util.pretty_date(ghrepo.pushed_at))
            return fmt.encode('utf-8')
    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
        raise
Esempio n. 23
0
def context_restricted_dialtone(agi):
    util.metric(agi, 'friction-context-restricted-dialtone')
    agi.set_context('restricted-outgoing-dialtone-wrapper')
    agi.set_extension('s')
    agi.set_priority(1)
Esempio n. 24
0
    sm = SMOTE(random_state=1)
    X_train_res, y_train_res = sm.fit_resample(X_train, y_train)

    # Scaling the the training set
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X_train_res)

    # Fit the data to the model
    model = SVC_Classification()
    model.fit(X_scaled, y_train_res)

    # Save the fitted model
    with open("models/svc_model.pkl", "wb") as m:
        pickle.dump(model, m)

    # Save the fitted scaler
    with open("models/svc_scaler.pkl", "wb") as s:
        pickle.dump(scaler, s)

    with open("models/svc_model.pkl", "rb") as m:
        model = pickle.load(m)

    with open("models/svc_scaler.pkl", "rb") as s:
        scaler = pickle.load(s)

    X_test_scaled = scaler.transform(X_test)
    y_pred = model.predict_label(X_test_scaled)
    precision, recall, accuracy, matrix = metric(y_test, y_pred)
    print(matrix)
    print("Recall: {}".format(round(recall, 2)))
    print("Precision: {}".format(round(precision, 2)))
Esempio n. 25
0
def busy(agi):
    util.metric(agi, 'friction-busy')
    agi.appexec('busy')
Esempio n. 26
0
def main():
    # set seed
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    # load data
    device = torch.device(args.device)
    sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(
        args.adjdata, args.adjtype)
    # suffix = '_filtered_we'  # _filtered_we, _filtered_ew
    eR_seq_size = 24  # 24
    error_size = 6
    dataloader = util.load_dataset(args.data,
                                   args.batch_size,
                                   args.batch_size,
                                   args.batch_size,
                                   eRec=args.eRec,
                                   eR_seq_size=eR_seq_size,
                                   suffix=args.suffix)
    scaler = dataloader['scaler']

    if args.retrain:
        dl_train = util.load_dataset(args.data,
                                     args.batch_size,
                                     args.batch_size,
                                     args.batch_size,
                                     eRec=args.eRec,
                                     eR_seq_size=eR_seq_size,
                                     suffix=args.suffix_train)
        scaler = dl_train['scaler']

    blocks = int(dataloader[f'x_train{args.suffix}'].shape[-3] /
                 3)  # Every block reduce the input sequence size by 3.
    print(f'blocks = {blocks}')

    supports = [torch.tensor(i).to(device) for i in adj_mx]

    print(args)

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler,
                     args.in_dim,
                     args.seq_length,
                     args.num_nodes,
                     args.nhid,
                     args.dropout,
                     args.learning_rate,
                     args.weight_decay,
                     device,
                     supports,
                     args.gcn_bool,
                     args.addaptadj,
                     adjinit,
                     blocks,
                     eRec=args.eRec,
                     retrain=args.retrain,
                     checkpoint=args.checkpoint,
                     error_size=error_size)

    if args.retrain:
        dataloader['val_loader'] = dataloader['train_loader']

    print("start training...", flush=True)
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        #if i % 10 == 0:
        #lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
        #for g in engine.optimizer.param_groups:
        #g['lr'] = lr
        train_loss = []
        train_mape = []
        train_rmse = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainy = torch.Tensor(y).to(device)
            if args.eRec:
                trainx = trainx.transpose(0, 1)
                trainy = trainy.transpose(0, 1)
            trainx = trainx.transpose(-3, -1)
            trainy = trainy.transpose(-3, -1)
            # print(f'trainx.shape = {trainx.shape}')
            # print(f'trainy.shape = {trainy.shape}')
            # print(f'trainy.shape final = {trainy[:,0,:,:].shape}')
            if args.eRec:
                metrics = engine.train(trainx, trainy[:, :, 0, :, :])
            else:
                metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_loss.append(metrics[0])
            train_mape.append(metrics[1])
            train_rmse.append(metrics[2])
            if iter % args.print_every == 0:
                log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
                print(log.format(iter, train_loss[-1], train_mape[-1],
                                 train_rmse[-1]),
                      flush=True)
        t2 = time.time()
        train_time.append(t2 - t1)
        #validation
        valid_loss = []
        valid_mape = []
        valid_rmse = []

        s1 = time.time()
        for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testy = torch.Tensor(y).to(device)
            if args.eRec:
                testx = testx.transpose(0, 1)
                testy = testy.transpose(0, 1)
            testx = testx.transpose(-3, -1)
            testy = testy.transpose(-3, -1)
            if args.eRec:
                metrics = engine.eval(testx, testy[:, :, 0, :, :])
            else:
                metrics = engine.eval(testx, testy[:, 0, :, :])
            valid_loss.append(metrics[0])
            valid_mape.append(metrics[1])
            valid_rmse.append(metrics[2])
        s2 = time.time()
        log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
        print(log.format(i, (s2 - s1)))
        val_time.append(s2 - s1)
        mtrain_loss = np.mean(train_loss)
        mtrain_mape = np.mean(train_mape)
        mtrain_rmse = np.mean(train_rmse)

        mvalid_loss = np.mean(valid_loss)
        mvalid_mape = np.mean(valid_mape)
        mvalid_rmse = np.mean(valid_rmse)
        his_loss.append(mvalid_loss)

        log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
        print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,
                         mvalid_mape, mvalid_rmse, (t2 - t1)),
              flush=True)
        torch.save(
            engine.model.state_dict(), args.save + "_epoch_" + str(i) + "_" +
            str(round(mvalid_loss, 2)) + ".pth")
    print("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    #testing
    bestid = 82  # 24 hay que sumarle 1 para obtener el ID del modelo
    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_id_25_2.6_best_model.pth"))
    # engine.model.load_state_dict(torch.load(args.save + f"_exp1_best_2.6.pth"))

    #torch.save(engine.model.state_dict(), args.save + f"_id_{bestid+1}_best_model.pth")
    print(f'best_id = {bestid+1}')

    outputs = []
    realy = torch.Tensor(dataloader[f'y_test{args.suffix}']).to(device)
    #print(f'realy: {realy.shape}')
    if args.eRec:
        realy = realy.transpose(0, 1)
        realy = realy.transpose(-3, -1)[-1, :, 0, :, :]
        #print(f'realy2: {realy.shape}')
    else:
        realy = realy.transpose(-3, -1)[:, 0, :, :]
        #print(f'realy2: {realy.shape}')
    criterion = nn.MSELoss(reduction='none')  # L2 Norm
    criterion2 = nn.L1Loss(reduction='none')
    loss_mse_list = []
    loss_mae_list = []

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testy = torch.Tensor(y).to(device)
        if args.eRec:
            testx = testx.transpose(0, 1)
            testy = testy.transpose(0, 1)
        testx = testx.transpose(-3, -1)
        testy = testy.transpose(-3, -1)
        with torch.no_grad():
            if args.eRec:
                preds = engine.model(testx, testy[:, :, 0:1, :, :],
                                     scaler).transpose(1, 3)
            else:
                preds = engine.model(testx).transpose(1, 3)

        #print(f'preds: {scaler.inverse_transform(torch.squeeze(preds.transpose(-3, -1))).shape}')
        #print(f'testy: {torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)).shape}')
        if args.eRec:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[-1, :, 0:1, :, :].transpose(-3, -1)))
        else:
            loss_mse = criterion(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))
            loss_mae = criterion2(
                scaler.inverse_transform(torch.squeeze(preds.transpose(-3,
                                                                       -1))),
                torch.squeeze(testy[:, 0:1, :, :].transpose(-3, -1)))

        loss_mse_list.append(loss_mse)
        loss_mae_list.append(loss_mae)

        outputs.append(preds.squeeze())

    loss_mse_list.pop(-1)
    loss_mae_list.pop(-1)
    loss_mse = torch.cat(loss_mse_list, 0)
    loss_mae = torch.cat(loss_mae_list, 0)
    #loss_mse = torch.squeeze(loss_mse).cpu()
    #loss_mae = torch.squeeze(loss_mae).cpu()
    loss_mse = loss_mse.cpu()
    loss_mae = loss_mae.cpu()
    print(f'loss_mae: {loss_mae.shape}')
    print(f'loss_mse: {loss_mae.shape}')

    res_folder = 'results/'
    original_stdout = sys.stdout
    with open(res_folder + f'loss_evaluation.txt', 'w') as filehandle:
        sys.stdout = filehandle  # Change the standard output to the file we created.
        count_parameters(engine.model)
        # loss_mae.shape --> (batch_size, seq_size, n_detect)
        print(' 1. ***********')
        print_loss('MSE', loss_mse)
        print(' 2. ***********')
        print_loss('MAE', loss_mae)
        print(' 3. ***********')
        print_loss_sensor('MAE', loss_mae)
        print(' 5. ***********')
        print_loss_seq('MAE', loss_mae)
        print(' 6. ***********')
        print_loss_sensor_seq('MAE', loss_mae)

        sys.stdout = original_stdout  # Reset the standard output to its original value

    with open(res_folder + f'loss_evaluation.txt', 'r') as filehandle:
        print(filehandle.read())

    yhat = torch.cat(outputs, dim=0)
    #print(f'yhat: {yhat.shape}')
    yhat = yhat[:realy.size(0), ...]
    #print(f'yhat2: {yhat.shape}')

    print("Training finished")
    #print("The valid loss on best model is", str(round(his_loss[bestid],4)))

    amae = []
    amape = []
    armse = []
    for i in range(args.seq_length):
        pred = scaler.inverse_transform(yhat[:, :, i])
        real = realy[:, :, i]
        metrics = util.metric(pred, real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over {:.4f} horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(
        log.format(args.seq_length, np.mean(amae), np.mean(amape),
                   np.mean(armse)))
    torch.save(
        engine.model.state_dict(), args.save + "_exp" + str(args.expid) +
        "_best_" + str(round(np.min(his_loss), 2)) + ".pth")
Esempio n. 27
0
        for step, input in enumerate(data_eval):
            pred, pred_prob, target = model(input)
            y_pred[step, :] = pred
            y_pred_prob[step, :] = pred_prob
            y_gt[step, :] = target
            llprint('\rEval--Epoch: %d, Step: %d/%d' %
                    (epoch, step, len(data_eval)))

        end_time = time.time()
        elapsed_time = (end_time - start_time) / 60
        llprint(
            '\n\tEpoch: %d, Loss: %.4f, One Epoch Time: %.2fm, Appro Left Time: %.2fm\n'
            % (epoch, np.mean(loss_record), elapsed_time, elapsed_time *
               (EPOCH - epoch - 1)))
        js, auc, p_1, p_3, p_5, f1, auprc = metric(y_gt, y_pred, y_pred_prob)
        llprint(
            '\tJS: %.4f, AUC: %.4f, P1: %.4f, P3: %.4f, P5: %.4f, F1: %.4f, AUPRC: %.4F\n'
            % (js, auc, p_1, p_3, p_5, f1, auprc))

        torch.save(
            model.state_dict(),
            open(
                os.path.join(
                    'saved', model_name, 'Epoch_%d_Loss_%.4f_P1_%.4f.model' %
                    (epoch, np.mean(loss_record), p_1)), 'wb'))
        print('')

    # test
    torch.save(model.state_dict(),
               open(os.path.join('saved', model_name, 'final.model'), 'wb'))
Esempio n. 28
0
def main():
    # set seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # load data
    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    adj_mx = util.load_adj(adj_path, args.adjtype)
    dataloader = util.load_dataset(outflow_path, args.batch_size,
                                   args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    supports = [torch.tensor(i).to(device) for i in adj_mx]

    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    engine = trainer(scaler, args.in_dim, args.seq_length, num_nodes,
                     args.nhid, args.dropout, args.learning_rate,
                     args.weight_decay, device, supports, args.gcn_bool,
                     args.addaptadj, adjinit)

    logger.write("start training...")
    his_loss = []
    val_time = []
    train_time = []
    for i in range(1, args.epochs + 1):
        # learning rate schedule
        if i % 10 == 0:
            lr = max(0.000002, args.learning_rate * (0.9**(i // 10)))
            for g in engine.optimizer.param_groups:
                g['lr'] = lr

        # train
        train_mae = []
        train_rmse = []
        train_mape = []
        t1 = time.time()
        dataloader['train_loader'].shuffle()
        for iter, (x,
                   y) in enumerate(dataloader['train_loader'].get_iterator()):
            trainx = torch.Tensor(x).to(device)
            trainx = trainx.transpose(1, 3)
            trainy = torch.Tensor(y).to(device)
            trainy = trainy.transpose(1, 3)
            # NOTE: B, T, V, F, F=2, but we noly need speed for label: y[:, 0, ...]
            metrics = engine.train(trainx, trainy[:, 0, :, :])
            train_mae.append(metrics[0])
            train_rmse.append(metrics[1])
            train_mape.append(metrics[2])
        # log results of training set.
        mtrain_mae = np.mean(train_mae)
        mtrain_rmse = np.mean(train_rmse)
        mtrain_mape = np.mean(train_mape) * 100
        train_writer.add_scalar('train/mae', mtrain_mae, i)
        train_writer.add_scalar('train/rmse', mtrain_rmse, i)
        train_writer.add_scalar('train/mape', mtrain_mape, i)

        # validation
        with torch.no_grad():
            valid_mae = []
            valid_mape = []
            valid_rmse = []
            s1 = time.time()
            for _, (x,
                    y) in enumerate(dataloader['val_loader'].get_iterator()):
                testx = torch.Tensor(x).to(device)
                testx = testx.transpose(1, 3)
                testy = torch.Tensor(y).to(device)
                testy = testy.transpose(1, 3)
                metrics = engine.eval(testx, testy[:, 0, :, :])
                valid_mae.append(metrics[0])
                valid_rmse.append(metrics[1])
                valid_mape.append(metrics[2])
            # log results of validation set.
            s2 = time.time()
            val_time.append(s2 - s1)
            mvalid_mae = np.mean(valid_mae)
            mvalid_mape = np.mean(valid_mape) * 100
            mvalid_rmse = np.mean(valid_rmse)
            his_loss.append(mvalid_mae)
            val_writer.add_scalar('val/mae', mvalid_mae, i)
            val_writer.add_scalar('val/rmse', mvalid_rmse, i)
            val_writer.add_scalar('val/mape', mvalid_mape, i)

        t2 = time.time()
        train_time.append(t2 - t1)
        if i % args.print_every == 0:
            logger.write(
                f'Epoch: {i:03d}, MAE: {mtrain_mae:.2f}, RMSE: {mtrain_rmse:.2f}, MAPE: {mtrain_mape:.2f}, Valid MAE: {mvalid_mae:.2f}, RMSE: {mvalid_rmse:.2f}, MAPE: {mvalid_mape:.2f}'
            )
        torch.save(
            engine.model.state_dict(), save_path + "_epoch_" + str(i) + "_" +
            str(round(mvalid_mae, 2)) + ".pth")

    logger.write("Average Training Time: {:.4f} secs/epoch".format(
        np.mean(train_time)))
    # logger.write("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))

    bestid = np.argmin(his_loss)
    engine.model.load_state_dict(
        torch.load(save_path + "_epoch_" + str(bestid + 1) + "_" +
                   str(round(his_loss[bestid], 2)) + ".pth"))

    logger.write("Training finished")
    logger.write(
        f"The valid loss on best model is {str(round(his_loss[bestid],4))}")

    # test
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1, 3)[:, 0, :, :]

    with torch.no_grad():
        t1 = time.time()
        for _, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
            testx = torch.Tensor(x).to(device)
            testx = testx.transpose(1, 3)
            preds = engine.model(testx).transpose(1, 3)
            outputs.append(preds.squeeze())

        t2 = time.time()
        logger.write(f'Inference time: {t2-t1:.4f}')
        yhat = torch.cat(outputs, dim=0)
        yhat = yhat[:realy.size(0), ...]

        # calculate metrics and save predictions
        preds = []
        reals = []
        logger.write('Step i, Test MAE, Test RMSE, Test MAPE')
        for i in range(args.seq_length):
            # prediction of step i
            pred = scaler.inverse_transform(yhat[:, :, i])
            real = realy[:, :, i]
            metrics = util.metric(pred.cpu().detach().numpy(),
                                  real.cpu().detach().numpy())
            logger.write(
                f'{metrics[0]:.2f}, {metrics[1]:.2f}, {metrics[2]*100:.2f}')

            preds.append(pred.tolist())
            reals.append(real.tolist())

    reals = np.array(reals)
    preds = np.array(preds)
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(reals))
    np.save(f'test_{args.city}_{args.tinterval}.npy', np.array(preds))
    torch.save(
        engine.model.state_dict(), save_path + "_exp" + str(args.expid) +
        "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
def main():
    device = torch.device(args.device)

    _, _, adj_mx = util.load_adj(args.adjdata, args.adjtype)
    supports = [torch.tensor(i).to(device) for i in adj_mx]
    if args.randomadj:
        adjinit = None
    else:
        adjinit = supports[0]

    if args.aptonly:
        supports = None

    model =  gwnet(device, args.num_nodes, args.dropout, supports=supports, gcn_bool=args.gcn_bool, addaptadj=args.addaptadj, aptinit=adjinit)
    model.to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()


    print('model load successfully')

    dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
    scaler = dataloader['scaler']
    outputs = []
    realy = torch.Tensor(dataloader['y_test']).to(device)
    realy = realy.transpose(1,3)[:,0,:,:]

    for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
        testx = torch.Tensor(x).to(device)
        testx = testx.transpose(1,3)
        with torch.no_grad():
            preds = model(testx).transpose(1,3)
        outputs.append(preds.squeeze())

    yhat = torch.cat(outputs,dim=0)
    yhat = yhat[:realy.size(0),...]

    
    amae = []
    amape = []
    armse = []
    for i in range(12):
        pred = scaler.inverse_transform(yhat[:,:,i])
        real = realy[:,:,i]
        metrics = util.metric(pred,real)
        log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
        print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
        amae.append(metrics[0])
        amape.append(metrics[1])
        armse.append(metrics[2])

    log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
    print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))

    idx_list = list(range(1, 13)) + ['average']
    amae.append(np.mean(amae))
    amape.append(np.mean(amape))
    armse.append(np.mean(armse))
    df = pd.DataFrame(list(zip(idx_list, amae, amape, armse)), columns=['horizon', 'MAE', 'MAPE', 'RMSE'])

    if args.savehorizon == 'True':
        excel_dir = 'result.xlsx'
        sheet_name= args.sheetname

        if (os.path.isfile(excel_dir)): # append a new sheet
            book = load_workbook(excel_dir)
            with pd.ExcelWriter(excel_dir, engine='openpyxl') as writer:  
                writer.book = book
                writer.sheets = dict((ws.title, ws) for ws in book.worksheets)    
                df.to_excel(writer, sheet_name=sheet_name, index=False) 
                writer.save()  
                writer.close()
        else:
            df.to_excel(excel_dir, sheet_name=sheet_name, index=False) 


    if args.plotheatmap == "True":
        adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)
        device = torch.device('cpu')
        adp.to(device)
        adp = adp.cpu().detach().numpy()
        adp = adp*(1/np.max(adp))
        df = pd.DataFrame(adp)
        sns.heatmap(df, cmap="RdYlBu")
        plt.savefig("./emb"+ '.pdf')

    # print(realy.shape) #torch.Size([6850, 207, 12]) (:, #node, window)

    y12 = realy[:,99,11].cpu().detach().numpy()
    yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()

    y3 = realy[:,99,2].cpu().detach().numpy()
    yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()

    df2 = pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3})
    df2.to_csv('./wave.csv',index=False)
Esempio n. 30
0
            model_type = 'XGBoost'

        if train_on_subset_data:
            r2, _ = train_on_different_classes(subset_type, equal=False)
            r2.insert(0, model_type)
            csv_writer.writerow(r2)

        if train_on_whole_data:
            x_train, x_test, y_train, y_test = transform_data(
                x_data=train_data, y_data=label, test_size=0.1, random_state=4)
            kappa_model = KappaModel(x_train, x_test, y_train, y_test)
            kappa_model.train_model(model, epochs=epochs)
            predict_train = kappa_model.predict(model, 'train')
            predict_test = kappa_model.predict(model, 'test')

            r2_train, mae_log_train, rmse_train = metric(
                y_train, predict_train)
            r2_test, mae_log_test, rmse_test = metric(y_cal=y_test,
                                                      y_pred=predict_test)
            MAEs_train = mae(np.exp(y_train), np.exp(predict_train))
            MAEs_test = mae(np.exp(y_test), np.exp(predict_test))
            print(mae_log_test, r2_test)

            metric_list = [
                MAEs_train, MAEs_test, r2_train, r2_test, mae_log_train,
                mae_log_test, rmse_train, rmse_test
            ]
            metric_matrix.append(metric_list)

    if train_on_whole_data:
        metric_matrix.insert(0, [
            'MAEs of train data', 'MAEs of test data', 'R2 of train data',