Exemplo n.º 1
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--logdir', type=str, default='result')
    parser.add_argument('-m', '--mute',
                        action='store_true', default=False)
    args = parser.parse_args()
    import train
    train.logdir = args.logdir
    train.init(load_model=False)
    plot_summary(os.path.join(result_dir, train.logdir), train.summary, args.mute)
Exemplo n.º 2
0
def pca_components(device):
    metric_values = []
    components_grid = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    for n_components in components_grid:
        params = settings['SWaT'].copy()
        params['num_signals'] = n_components
        train_params, dset = init(params, device)
        train(**train_params,
              visualize_generation=False,
              evaluate_model=False,
              save_model=False)

        dset = params['data'](params['normal_data_path'],
                              params['abnormal_data_path'],
                              normal_label=params['normal_label'],
                              abnormal_label=params['abnormal_label'],
                              seq_length=params['seq_length'],
                              seq_step=params['seq_step'],
                              num_signals=params['num_signals'])
        evaluate_loader = torch.utils.data.DataLoader(
            dset.all_data, batch_size=params['batch_size'], shuffle=True)

        metric_values.append(
            evaluate_anomaly_detection(
                evaluate_loader, train_params['generator'],
                train_params['discriminator'], torch.optim.RMSprop,
                covariance_similarity, 1e-3, 100, params['latent_dim'],
                params['lambda'], params['tau'], params['normal_label'],
                device))

    metric_values = pd.DataFrame(metric_values,
                                 columns=['precision', 'recall', 'f1'],
                                 index=components_grid)
    print("Metrics dependency on number of PCA components:\n", metric_values)
    metric_values.to_csv('./experiments_results/pca_metrics.csv')
def main():
    # initialize configuration parameters
    config = init()
    config.set_seed()

    # load trained model
    model = PolyNet(config)
    if os.path.isfile('./trained_models/trainedModel.pth'):

        torch.load('./trained_models/trainedModel.pth',
                   map_location=lambda storage, loc: storage)

        model.load_state_dict(
            torch.load('./trained_models/trainedModel.pth',
                       map_location=torch.device('cpu')))
    else:
        raise FileNotFoundError

    # initialize testing dataset

    datapath = os.path.join(config.path, 'test')
    testdata = ReadDataset(datapath, randomflip=False)
    test_loader = DataLoader(dataset=testdata,
                             batch_size=1,
                             shuffle=True,
                             num_workers=1)
    # testing
    test(model, test_loader, config, datapath)

    print('calculating AP...')
    evalCOCO()  # COCO measure for AP evaluation, averaged on 10 iou threshold
    # calmAP()# traditional AP evaluation, AP is computed at a single IoU of .50
    print('test finished')
def main():
    from train import init
    func, config = init()

    def runner(imgs):
        return func(0, config, 'inference', imgs=torch.Tensor(np.float32(imgs)))['preds']

    def do(img, c, s):
        ans = inference(img, runner, config, c, s)
        if len(ans) > 0:
            ans = ans[:,:,:3]

        ## ans has shape N,16,3 (num preds, joints, x/y/visible)
        pred = []
        for i in range(ans.shape[0]):
            pred.append({'keypoints': ans[i,:,:]})
        return pred

    gts = []
    preds = []
    normalizing = []
    
    num_eval = config['inference']['num_eval']
    num_train = config['inference']['train_num_eval']
    for anns, img, c, s, n in get_img(config, num_eval, num_train):
        gts.append(anns)
        pred = do(img, c, s)
        preds.append(pred)
        normalizing.append(n)

    mpii_eval(preds, gts, normalizing, num_train)
Exemplo n.º 5
0
def main():
    from train import init
    func, config = init()
    mode = config['opt'].mode

    def runner(imgs):
        return func(0, config, 'inference', imgs=torch.Tensor(np.float32(imgs)))['preds']

    def do(img):
        ans, scores = multiperson(img, runner, mode)
        if len(ans) > 0:
            ans = ans[:,:,:3]

        pred = genDtByPred(ans)

        for i, score in zip( pred, scores ):
            i['score'] = float(score)
        return pred

    gts = []
    preds = []

    idx = 0
    for anns, img in get_img(inp_res=-1):
        idx += 1
        gts.append(anns)
        preds.append(do(img))

    prefix = os.path.join('exp', config['opt'].exp)
    coco_eval(prefix, preds, gts)
Exemplo n.º 6
0
def train_model(num_epochs,
                freeze_layers_number,
                auto_load_finetune=False,
                visual=False):
    try:
        print("Training...")
        train.init()
        train.train(num_epochs,
                    freeze_layers_number,
                    auto_load_finetune=auto_load_finetune,
                    visual=visual)
    except Exception as e:
        print(e)
        traceback.print_exc()
    finally:
        print("finally")
        util.unlock()
Exemplo n.º 7
0
def train_models(device):
    for dataset_name in ['kdd99', 'WADI', 'SWaT']:
        train_params, dset = init(settings[dataset_name], device)
        train(**train_params,
              visualize_generation=True,
              evaluate_model=True,
              save_model=False,
              model_name=dataset_name)

        with open('./experiments_results/model_' + dataset_name + '.pkl',
                  'wb') as f:
            pickle.dump([
                train_params['generator'], train_params['discriminator'],
                train_params['settings']
            ], f)
Exemplo n.º 8
0
def init(logdir, test_set, use_batch):
    import train
    train.logdir = logdir
    train.init()
    if test_set:
        from lib.sampler import VOCDetection, data_loader, batch_data_loader
        # from lib.consts import voc_test_data_dir, voc_test_ann_dir, transform
        from lib.consts import voc_root, transform
        from lib.consts import low_memory
        voc_test = VOCDetection(root=voc_root,
                                split='test',
                                transform=transform,
                                flip=False,
                                no_diff=False)
        voc_test.mute = True
        if use_batch:
            batch_size = 8 if low_memory else 32
            loader = batch_data_loader(voc_test, batch_size=batch_size)
        else:
            loader = data_loader(voc_test, shuffle=False)
    else:
        train.voc_train.mute = True
        loader = train.loader_train
    return train.model, loader
Exemplo n.º 9
0
def evaluate_interpolate_data(args, config):

    x, y, w = load_data(config)

    device, kwargs = train.init(args)

    pt = torch.load(args.evaluate_data)
    x_i = torch.from_numpy(pt['sim_xp']).float().to(device)
    y_j = x[1].to(device)

    ot_solver = SamplesLoss("sinkhorn",
                            p=2,
                            blur=config.sinkhorn_blur,
                            scaling=config.sinkhorn_scaling)
    loss_xy = ot_solver(x_i, y_j)

    import pdb
    pdb.set_trace()
Exemplo n.º 10
0
def run():

    transform, computing_device, extras, seed = train.init(config['seed'])

    if config['test'] is False:

        train.train(config['model_name'], seed, computing_device,
                    config['num_epochs'], config['k'], config['learning_rate'],
                    config['batch_size'], config['num_mb'],
                    config['wvlt_transform'], config['p_test'], transform,
                    extras, config['outname'])

    else:

        train.test(config['model_name'], seed, computing_device,
                   config['num_epochs'], config['k'], config['learning_rate'],
                   config['batch_size'], config['num_mb'],
                   config['wvlt_transform'], config['p_test'], transform,
                   extras, config['outname'])
Exemplo n.º 11
0
def main():
    from train import init
    func, config = init()  # 模型定义与装载模型
    mode = config['opt'].mode

    def runner(imgs):
        return func(0,
                    config,
                    'inference',
                    imgs=torch.Tensor(np.float32(imgs)))['preds']

    def do(img):
        ans, scores = multiperson(img, runner, mode)  # [N, 17, 5] [N]
        if len(ans) > 0:
            ans = ans[:, :, :3]  # [N, 17, 3] --x, y, value

        pred = genDtByPred(
            ans)  # [N]  Generate the json-style data for the output

        for i, score in zip(pred, scores):
            i['score'] = float(score)
        return pred  # 图片的预测结果

    gts = []
    preds = []

    idx = 0
    for anns, img in get_img(inp_res=-1):  # here return image without rescale
        idx += 1
        gts.append(anns)  # 注意是multi person
        preds.append(do(img))  # 预测结果
        if True:
            img_tmp = img.copy()
            for i in preds[idx - 1]:  #对于这个检测结果的每一个个体
                draw_limbs(img_tmp, i['keypoints'])
            plt.imshow(img_tmp)
            plt.show()
            cv2.imwrite('{}.jpg'.format(idx), img_tmp[:, :, ::-1])

    prefix = os.path.join('exp', config['opt'].exp)
    coco_eval(prefix, preds, gts)
Exemplo n.º 12
0
def seq_length(device):
    for dataset_name in ['WADI', 'SWaT']:
        metric_values = []
        seq_length_grid = [30, 60, 90, 120, 150, 180, 210, 240, 270, 300]
        for seq_length in seq_length_grid:
            params = settings['SWaT'].copy()
            params['seq_length'] = seq_length
            train_params, dset = init(params, device)
            train(**train_params,
                  visualize_generation=False,
                  evaluate_model=False,
                  save_model=False)

            dset = params['data'](params['normal_data_path'],
                                  params['abnormal_data_path'],
                                  normal_label=params['normal_label'],
                                  abnormal_label=params['abnormal_label'],
                                  seq_length=params['seq_length'],
                                  seq_step=params['seq_step'],
                                  num_signals=params['num_signals'])
            evaluate_loader = torch.utils.data.DataLoader(
                dset.all_data, batch_size=params['batch_size'], shuffle=True)

            metric_values.append(
                evaluate_anomaly_detection(
                    evaluate_loader, train_params['generator'],
                    train_params['discriminator'], torch.optim.RMSprop,
                    covariance_similarity, 1e-3, 100, params['latent_dim'],
                    params['lambda'], params['tau'], params['normal_label'],
                    device))

        metric_values = pd.DataFrame(metric_values,
                                     columns=['precision', 'recall', 'f1'],
                                     index=seq_length_grid)
        print(
            "Metrics dependency on sequence length ({}):\n".format(
                dataset_name), metric_values)
        metric_values.to_csv(
            './experiments_results/seq_length_metrics_{0}.csv'.format(
                dataset_name))
Exemplo n.º 13
0
def generate(test_set, is_train=True):
    func, config = net.init()

    for idx in tqdm(test_set):
        img = ds.load_image(idx, is_train)
        #imsave('./save/origin_{}.png'.format(idx), img)
        img = (img / 255 - 0.5) * 2
        img = img.astype(np.float32)
        img = img[None, :, :, :]
        img = torch.FloatTensor(img)
        output = func(-1, config, phase='inference', imgs=img)
        pred = torch.FloatTensor(output['preds'][0][:, -1])
        pred = pred[0, :, :, :]
        pred = pred.permute(1, 2, 0)
        for i in range(3):
            pred[:, :, i] = pred[:, :, i] / torch.norm(pred, 2, 2)

        pred = (pred / 2 + 0.5) * 255
        pred = pred.numpy()
        pred = pred.astype(np.uint8)
        #pred = imresize(pred, (128, 128))
        imsave('./save/{}.png'.format(idx), pred)
Exemplo n.º 14
0
def main():
    # get config info and train method(loss)
    from train import init
    func, config = init()
    # opt means parsed command line, mode contains single or multiple in evaluation
    mode = config['opt'].mode

    def runner(imgs):
        # this runner is actually a function from train.py make_train, but function contain the net.eval
        return func(0,
                    config,
                    'inference',
                    imgs=torch.Tensor(np.float32(imgs)))['preds']

    def do(img):
        ans, scores = multiperson(img, runner, mode)
        if len(ans) > 0:
            ans = ans[:, :, :3]

        pred = genDtByPred(ans)

        for i, score in zip(pred, scores):
            i['score'] = float(score)
        return pred

    gts = []
    preds = []

    idx = 0
    for anns, img in get_img(inp_res=-1):
        idx += 1
        gts.append(anns)
        preds.append(do(img))

    prefix = os.path.join('exp', config['opt'].exp)
    coco_eval(prefix, preds, gts)
Exemplo n.º 15
0
def main():
    from train import init
    func, config = init()

    def runner(imgs):
        return func(0, config, 'inference', imgs=torch.Tensor(np.float32(imgs)))['preds']

    def do(img):
        ans = inference(img, runner, config)
        if len(ans) > 0:
            ans = ans[:, :, :3]

        ## ans has shape N,16,3 (num preds, joints, x/y/visible)
        pred = []
        for i in range(ans.shape[0]):
            pred.append({'keypoints': ans[i, :, :]})
        return pred

    gts = []
    preds = []

    for anns, img in get_img():
        gts.append(anns)
        preds = do(img)
Exemplo n.º 16
0
from models import GreedySearchDecoder, chat
from settings import TRAIN_BEFORE_CHAT
from train import init, trainWrapper

print("Initialising...")
(encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, voc,
 pairs_of_sentences, checkpoint) = init()
print("Done")
print("Starting Training!")
if TRAIN_BEFORE_CHAT:
    trainWrapper(voc, pairs_of_sentences, encoder, decoder, encoder_optimizer,
                 decoder_optimizer, embedding, checkpoint)
# Tchat  !
encoder.eval()
decoder.eval()
# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder)
chat(encoder, decoder, searcher, voc)
Exemplo n.º 17
0
                       interpolation=interpolation)

            plt.subplot(222).set_title('Midcurve')
            plt.subplot(222).axis('off')
            plt.imshow(midcurve_img[:, :, 0],
                       cmap='gray',
                       interpolation=interpolation)

            plt.subplot(223).set_title('ST1 Polygon (R)')
            plt.subplot(223).axis('off')
            plt.imshow(recon[:, :, 0],
                       cmap='gray',
                       interpolation=interpolation)

            plt.subplot(224).set_title('ST1 Midcurve')
            plt.subplot(224).axis('off')
            plt.imshow(midc[:, :, 0], cmap='gray', interpolation=interpolation)

            plt.savefig('results/' + file, dpi=100)

        print(file)


if __name__ == "__main__":
    generators = init()
    generators[0].load_weights('weights/stage1/5_gen_epochs.h5')
    generators[1].load_weights('weights/stage2/2_gen_epochs.h5')

    generate_test_results(generators[0], generators[1],
                          BASE_DIR + 'data/test/', None)
Exemplo n.º 18
0
import uuid



from PIL import Image
from flask import Flask, render_template, request
import face_recognition
from flask_script import Manager
import train


app = Flask(__name__)
# manager = Manager(app)
app.config['secret_key'] = "ddddd"
train.init()


@app.route('/')
def index():
    return render_template("index.html")


@app.route('/pichandler', methods=['post', 'get'])
def pichandler():
    name = ""
    locations =""

    pic_data_url = request.form.get("picdata")
    # print(">>>>>>>",request.form.get('picdata'))
    imgdata = base64.b64decode(pic_data_url.split(',')[1])
Exemplo n.º 19
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('mode', type=str, nargs=1)
    parser.add_argument('--logdir', type=str, default='result')
    parser.add_argument('--filedir', '-d', type=str)
    parser.add_argument('--savedir', '-s', type=str)
    parser.add_argument('--format', '-f', type=str, default='png')
    args = parser.parse_args()

    train.logdir = args.logdir
    train.init()
    model = train.model
    model.eval()

    color_set = np.random.uniform(size=(num_classes, 3))

    savedir = args.savedir
    format = args.format
    counter = [0]
    dpi = 200

    def inner(img):
        image, results = predict_raw(model, img)
        if counter[0] == 0:
            h, w = image.shape[:2]
            plt.figure(figsize=(w / dpi, h / dpi))
        plt.cla()
        visualize_raw(image, results, color_set=color_set)
        plt.subplots_adjust(top=1,
                            bottom=0,
                            right=1,
                            left=0,
                            hspace=0,
                            wspace=0)
        plt.margins(0, 0)
        plt.gca().xaxis.set_major_locator(plt.NullLocator())
        plt.gca().yaxis.set_major_locator(plt.NullLocator())
        if savedir:
            plt.savefig('{}.{}'.format(
                os.path.join(savedir, '%06d' % counter[0]), format),
                        format=format,
                        dpi=dpi,
                        bbox_inches='tight',
                        pad_inches=0)
        counter[0] += 1
        plt.pause(0.03)

    mode = args.mode[0]
    if mode == 'camera':
        camera(inner)
    elif mode == 'image':
        if not args.filedir:
            print('filename required')
            sys.exit(1)
        image(args.filedir, inner)
    elif mode == 'video':
        if not args.filedir:
            print('filedir required')
            sys.exit(1)
        video(args.filedir, inner)
    else:
        print('unrecognized mode {}'.format(mode))
Exemplo n.º 20
0
import test
import os

if __name__ == "__main__":
    frompath = "train/"
    savepath = "tmp"
    topath = "tmp/"
    os.system("if [ ! -d %s ]; then mkdir %s; fi" % (savepath, savepath))
    filelist = os.listdir(frompath)
    group_ix = 0
    group_num = 10
    fp = open("tmp/stat.dat", "w")
    total_rate = {}
    for group_ix in xrange(group_num):
        print "Group %s:", group_ix
        train.init()
        train.get_stop_words("stop_words_ch.txt")
        test_filelist = []
        train_filelist = []
        train_filenum = 0
        test_filenum = 0

        for file_ix in xrange(len(filelist)):
            if file_ix % group_num == group_ix:
                test_filelist.append(filelist[file_ix])
                test_filenum += 1
            else:
                train_filelist.append(filelist[file_ix])
                train_filenum += 1

        train.count(frompath, train_filenum, train_filelist)
Exemplo n.º 21
0
def evaluate_fate(args, config):

    # -- load data

    data_pt = torch.load(os.path.join(config.data_dir, 'fate_test.pt'))
    x = data_pt['x']
    y = data_pt['y']
    t = data_pt['t']

    ay_path = os.path.join(config.data_dir, '50_20_10')
    ay = annoy.AnnoyIndex(config.x_dim, 'euclidean')
    ay.load(ay_path + '.ann')
    with open(ay_path + '.txt', 'r') as f:
        cy = np.array([line.strip() for line in f])

    # -- initialize

    device, kwargs = train.init(args)

    # -- model

    model = train.AutoGenerator(config)

    log_str = '{} {:.5f} {:.3e} {:.5f} {:.3e} {:d}'
    log_handle = open(os.path.join(config.out_dir, 'fate.log'), 'w')

    names_ = []
    scores_ = []
    masks_ = []

    train_pts = sorted(glob.glob(config.train_pt.format('*')))
    for train_pt in train_pts:

        name = os.path.basename(train_pt).split('.')[1]

        checkpoint = torch.load(train_pt)
        print('Loading model from {}'.format(train_pt))
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        print(model)

        # -- evaluate
        torch.manual_seed(0)

        time_elapsed = config.t
        num_steps = int(np.round(time_elapsed / config.train_dt))

        scores = []
        mask = []
        pbar = tqdm.tqdm(range(len(x)), desc="[fate:{}]".format(name))
        for i in pbar:

            # expand data point
            x_i = x[i].expand(config.ns, -1).to(device)

            # simulate forward
            for _ in range(num_steps):
                z = torch.randn(x_i.shape[0], x_i.shape[1]) * config.train_sd
                z = z.to(device)
                x_i = model._step(x_i, dt=config.train_dt, z=z)
            x_i_ = x_i.detach().cpu().numpy()

            # predict
            yp = []
            for j in range(x_i_.shape[0]):
                nn = cy[ay.get_nns_by_vector(x_i_[j], 20)]
                nn = Counter(nn).most_common(2)
                label, num = nn[0]
                if len(nn) > 1:
                    _, num2 = nn[1]
                    if num == num2:  # deal with ties by setting it to the default class
                        label = 'Other'
                yp.append(label)
            yp = Counter(yp)

            # may want to save yp instead
            num_neu = yp['Neutrophil'] + 1  # use pseudocounts for scoring
            num_total = yp['Neutrophil'] + yp['Monocyte'] + 2
            score = num_neu / num_total
            scores.append(score)
            num_total = yp['Neutrophil'] + yp['Monocyte']
            mask.append(num_total > 0)

        scores = np.array(scores)
        mask = np.array(mask)

        r, pval = scipy.stats.pearsonr(y, scores)
        r_masked, pval_masked = scipy.stats.pearsonr(y[mask], scores[mask])

        log = log_str.format(name, r, pval, r_masked, pval_masked, mask.sum())
        log_handle.write(log + '\n')
        print(log)

        names_.append(name)
        scores_.append(scores)
        masks_.append(mask)

    log_handle.close()

    torch.save({
        'scores': scores_,
        'mask': masks_,
        'names': names_
    }, os.path.join(config.out_dir, 'fate.pt'))
Exemplo n.º 22
0
def main():

    # %% Setup

    voc_test = VOCDetection(root=voc_root,
                            split='test',
                            transform=transform,
                            flip=False)
    voc_test.mute = True
    if use_batch:
        loader_test = batch_data_loader(voc_test, 8 if low_memory else 32)
    else:
        loader_test = data_loader(voc_test, shuffle=False)

    train.logdir = args.logdir
    train.init()
    model = train.model
    model.eval()

    try:
        os.mkdir(savedir)
        print('Create new dir')
    except:
        print('Rewrite existing dir "{}"?'.format(savedir), end=' ')
        ans = '\0'
        while not ans == '' or ans == 'yes' or ans == 'no':
            ans = input('[yes]/no: ')
            if ans == 'no':
                os._exit(0)

    open_files()

    # %% Generate .txt results on Pascal VOC 2007

    tic = time()

    if use_batch:
        num_batches = 0
        for img, _, info in loader_test:
            detection = predict_batch(model, img, info)
            num_batches += len(detection)
            process_bar(time() - tic, num_batches, len(loader_test))
            for results, a in zip(detection, info):
                results_to_raw(results, a['scale'], *a['shape'])
                for result in results:
                    append_result(a['image_id'], result['class_idx'],
                                  result['bbox'], result['confidence'])
    else:
        for i, (x, _, a) in enumerate(loader_test):
            results = predict(model, x, a)
            results_to_raw(results, a['scale'], *a['shape'])
            process_bar(time() - tic, i + 1, len(loader_test))
            for result in results:
                append_result(a['image_id'], result['class_idx'],
                              result['bbox'], result['confidence'])

    print('\nUsed time: {:.2f}s'.format(time() - tic))

    # %% The end

    close_files()
Exemplo n.º 23
0
def evaluate_fit(args, config):

    log_path = os.path.join(config.out_dir, 'interpolate.log')
    if os.path.exists(log_path):
        print(log_path, 'exists. Skipping.')
        return

    x, y, w = load_data(config)

    # -- initialize
    device, kwargs = train.init(args)
    model = train.AutoGenerator(config)

    ot_solver = SamplesLoss("sinkhorn",
                            p=2,
                            blur=config.sinkhorn_blur,
                            scaling=config.sinkhorn_scaling)

    losses_xy = []
    train_pts = sorted(glob.glob(config.train_pt.format('*')))
    for train_pt in train_pts:

        checkpoint = torch.load(train_pt)
        print('Loading model from {}'.format(train_pt))
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        print(model)

        name = os.path.basename(train_pt).split('.')[1]

        # -- evaluate
        torch.manual_seed(0)
        np.random.seed(0)

        for t_cur in config.train_t:

            t_prev = config.start_t
            y_prev = int(y[t_prev])
            y_cur = int(y[t_cur])

            time_elapsed = y_cur - y_prev
            num_steps = int(np.round(time_elapsed / config.train_dt))

            dat_prev = x[t_prev].to(device)
            w_prev = train.get_weight(w[(y_prev, y_cur)],
                                      time_elapsed).cpu().numpy()

            x_s = []
            x_i_ = train.weighted_samp(dat_prev, args.evaluate_n, w_prev)

            for i in range(int(args.evaluate_n / config.ns)):

                x_i = x_i_[i * config.ns:(i + 1) * config.ns, ]

                for _ in range(num_steps):
                    z = torch.randn(x_i.shape[0],
                                    x_i.shape[1]) * config.train_sd
                    z = z.to(device)
                    x_i = model._step(x_i, dt=config.train_dt, z=z)

                x_s.append(x_i.detach())

            x_s = torch.cat(x_s)

            loss_xy = ([name, t_cur] + [
                ot_solver(x_s, x[t_].to(device)).item() for t_ in range(len(x))
            ])
            losses_xy.append(loss_xy)

    losses_xy = pd.DataFrame(losses_xy, columns=['epoch', 't_cur'] + y)
    losses_xy.to_csv(log_path, sep='\t', index=False)
    print('Wrote results to', log_path)