示例#1
0
def train(epoch, models):
    for model in models:
        model.train()

    ep_tot = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            model.global_num += data.size()[0]

            timer = Timer("train", "sample from " + model.name + " with " + model.ty.name, data.size()[0], False)
            lossy = 0
            with timer:
                for s in model.getSpec(data,target):
                    model.optimizer.zero_grad()

                    loss = model.aiLoss(*s, **vargs).sum() / data.size()[0]
                    lossy += loss.item()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
                    model.optimizer.step()
                    model.clip_norm()
            
            model.addSpeed(timer.getUnitTime())

            if batch_idx % args.log_interval == 0:
                print(('Train Epoch {:12} {:'+ str(largest_domain) +'}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}').format(
                    model.name,  model.ty.name,
                    epoch, 
                    batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), 
                    model.speed,
                    lossy))
示例#2
0
def surprise_knn_best_params(train_path="datas/train.csv",
                             test_path="datas/test.csv",
                             verbose=True,
                             t=Timer()):
    # reader with rating scale
    reader = Reader(line_format='user item rating',
                    sep=',',
                    rating_scale=(1, 5))
    # load data from df
    data = Dataset.load_from_file(train_path, reader)

    #knn parameters
    ks = np.linspace(40, 200, 9, dtype=np.int64)
    names = ['pearson_baseline', 'pearson', 'msd', 'cosine']
    user_baseds = [True, False]
    params = dict()
    rmses = dict()

    for k in ks:
        params['k'] = k
        for name in names:
            params['name'] = name
            for user_based in user_baseds:
                params['user_based'] = user_based
                algo = KNNBaseline(k=k,
                                   sim_options={
                                       'name': name,
                                       'user_based': user_based
                                   },
                                   verbose=True)
                rmse = surprise_cv_algo(data, algo)
                print(
                    "------Time:{}, rmse: {}, k: {}, name: {}, user_based: {}------\n\n"
                    .format(t.now(), rmse, k, name, user_based))
                rmses[rmse] = params

    # Find the model with least RMSE
    lowest_rmse = min(rmses.keys())
    best_params = rmses[lowest_rmse]

    print("Best knn rmse: {}. Params: k: {}, name: {}, user_based: {}".format(
        lowest_rmse, best_params['k'], best_params['name'],
        best_params['user_based']))
示例#3
0
def run(liczba):

    v = liczba
    wierzcholki = []
    dell = DEL_lkrawedzi(v)
    dfs = DFS_lkrawedz(v)
    for i in range(liczba):
        for z in range(liczba):
            if z>i:
                razem = str(i)+str(z)
                w1 = int(i)
                w2 = int(z)
                dell.addEdge(w1,w2)
                dfs.addEdge(w1,w2)
                wierzcholki.append(razem)


    print("Sortowanie topologiczne przez usuwanie: ")

    timer_sort_topological_del = Timer()
    timer_sort_topological_del.start()

    dell.topologicalSort_DEL()
    klopsy = []
    i = 0
    wierzcholkidell= []
    wierzcholkidfs =[]
    #print(wierzcholki, "soema")
    for znajdz in listadell:
        for iins in wierzcholki:
            if znajdz == int(iins[0]):
                razem = iins[0]+iins[1:len(iins)]
                wierzcholkidell.append(razem)

    pusta =[]
    opal = []
    poprzedni = wierzcholkidell[0][0]
    for znajdz in listadell:
        for iins in wierzcholki:
            if znajdz == int(iins[0]):
                opal.append(iins[1])
            else:
                if opal:
                    klopsy.append(opal)
                opal = []
    if len(klopsy) != len(listadell):
        klopsy.append("['_']")
    glob = 0
    #print("Lista nastepnikow dla sortowania dell:")
    '''for glop in listadell:
        print("(", glop, ") -", klopsy[glob])
        glob = glob +1
    print("\n")'''


    timer_sort_topological_del.stop()



    print("Sortwanie topologiczne metoda dfs: ")

    timer_sort_topological_dfs = Timer()
    timer_sort_topological_dfs.start()

    dfs.topologicalSort_DFS()

    for znajdz in listadfs:
        for iins in wierzcholki:
            if znajdz == int(iins[0]):
                razem = iins[0]+iins[1]
                wierzcholkidfs.append(razem)
    klopsy = []
    pusta =[]
    opal = []
    poprzedni = wierzcholkidell[0][0]
    for znajdz in listadfs:
        for iins in wierzcholki:
            if znajdz == int(iins[0]):
                opal.append(iins[1])
            else:
                if opal:
                    klopsy.append(opal)
                opal = []
    klopsy.append("['_']")
    glob = 0
    #print("Lista nastepnikow dla sortowania dfs:")
    '''for glop in listadfs:
        print("(", glop, ") -", klopsy[glob])
        glob = glob + 1'''

    timer_sort_topological_dfs.stop()

    return {
        'sort_top_del': timer_sort_topological_del.get_mean_time(),
        'sort_top_dfs': timer_sort_topological_dfs.get_mean_time()
    }
示例#4
0
    diagnostics = IntcodeComputer(data, 1)
    diagnostics.run()

    return diagnostics.output


def main2(data=None):
    if not data:
        data = load_inputs_lines('05', 'inputs.txt')
        data = data[0]
        data = [int(d) for d in data.split(',')]

    diagnostics = IntcodeComputer(data, 5)
    diagnostics.run()

    return diagnostics.output


if __name__ == "__main__":
    timer = Timer()
    result1 = main1()
    timer.stop()
    print(result1)
    print(f'{timer.duration} ms')

    timer.start()
    result2 = main2()
    timer.stop()
    print(result2)
    print(f'{timer.duration} ms')
示例#5
0
def test(models, epoch, data_loader, f=None):
    class MStat:
        def __init__(self, model):
            model.eval()
            self.model = model
            self.correct = 0
            self.test_loss = 0

            class Stat:
                def __init__(self, d, dnm):
                    self.domain = d
                    self.name = dnm
                    self.width = 0
                    self.safe = 0
                    self.proved = 0
                    self.time = 0

            self.domains = [
                Stat(getattr(domains, d), d) for d in args.test_domain
            ]

    model_stats = [MStat(m) for m in models]

    num_its = 0
    for data, target in data_loader:
        if num_its >= args.test_size:
            break
        num_its += data.size()[0]
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for m in model_stats:
            with torch.no_grad():
                m.test_loss += m.model.stdLoss(
                    data, None, target).sum().item()  # sum up batch loss

            tyorg = m.model.ty

            with torch.no_grad():
                pred = m.model(data).data.max(1, keepdim=True)[
                    1]  # get the index of the max log-probability
                m.correct += pred.eq(target.data.view_as(pred)).sum()

            for stat in m.domains:
                timer = Timer(shouldPrint=False)
                with timer:
                    m.model.ty = stat.domain

                    def calcData(data, target):
                        box = m.model.boxSpec(data, target)[0]
                        with torch.no_grad():
                            if m.model.ty in POINT_DOMAINS:
                                preder = m.model(box[0]).data
                                pred = preder.max(
                                    1, keepdim=True
                                )[1]  # get the index of the max log-probability
                                org = m.model(data).max(1, keepdim=True)[1]
                                stat.proved += float(org.eq(pred).sum())
                                stat.safe += float(
                                    pred.eq(target.data.view_as(pred)).sum())
                            else:
                                bs = m.model(box[1])
                                stat.width += m.model.widthL(bs).data[
                                    0]  # sum up batch loss
                                stat.safe += m.model.isSafeDom(
                                    bs, target).sum().item()
                                stat.proved += sum([
                                    m.model.isSafeDom(bs,
                                                      (h.ones(target.size()) *
                                                       n).long()).sum().item()
                                    for n in range(num_classes)
                                ])

                    if m.model.net.neuronCount(
                    ) < 5000 or stat.domain in SYMETRIC_DOMAINS:
                        calcData(data, target)
                    else:
                        for d, t in zip(data, target):
                            calcData(d.unsqueeze(0), t.unsqueeze(0))
                stat.time += timer.getUnitTime()
            m.model.ty = tyorg

    l = num_its  # len(test_loader.dataset)
    for m in model_stats:

        pr_corr = float(m.correct) / float(l)
        if args.use_schedule:
            m.model.lrschedule.step(1 - pr_corr)

        h.printBoth(
            'Test: {:12} trained with {:8} - Mult {:1.8f}, Avg sec/ex {:1.12f}, Average loss: {:8.4f}, Accuracy: {}/{} ({:3.1f}%)'
            .format(m.model.name, m.model.ty.name,
                    m.model.getMult().data[0], m.model.speed, m.test_loss / l,
                    m.correct, l, 100. * pr_corr), f)

        model_stat_rec = ""
        for stat in m.domains:
            pr_safe = stat.safe / l
            pr_proved = stat.proved / l
            pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
            h.printBoth(
                "\t{:10} - Width: {:<22.4f} Pr[Proved]={:<1.3f}  Pr[Corr and Proved]={:<1.3f}  Pr[Corr|Proved]={:<1.3f}    Time = {:<7.5f}"
                .format(stat.name, stat.width / l, pr_proved, pr_safe,
                        pr_corr_given_proved, stat.time), f)
            model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(
                stat.name, pr_proved, pr_safe, pr_corr_given_proved)
        net_file = os.path.join(
            out_dir, m.model.name + "_checkpoint_" + str(epoch) +
            "_with_{:1.3f}".format(pr_corr) + "__" + model_stat_rec + ".net")

        h.printBoth("\tSaving netfile: {}\n".format(net_file), f)

        if epoch == 1 or epoch % 10 == 0:
            torch.save(m.model.net, net_file)
示例#6
0

def adjust_learning_rate(optimizer, epoch):
    lr = args.lr
    if epoch >= 0.85 * args.epochs:
        lr = args.lr * 0.01
    elif epoch >= 0.7 * args.epochs:
        lr = args.lr * 0.1
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


with open(os.path.join(out_dir, "log.txt"), "w") as f:

    startTime = timer()
    for epoch in range(1, args.epochs + 1):
        for model in models:
            adjust_learning_rate(model.optimizer, epoch)
            for param_group in model.optimizer.param_groups:
                print(param_group['lr'])
        if (epoch - 1) % args.test_freq == 0:
            with Timer("test before epoch " + str(epoch), "sample", 10000):
                print('check for training data:')
                test(models, epoch, train_loader, f)
                print('check for test data: ')
                test(models, epoch, test_loader, f)

        h.printBoth("Elapsed-Time: {:.2f}s\n".format(timer() - startTime), f)
        with Timer("train", "sample", 60000):
            train(epoch, models)
示例#7
0
def test(models, epoch, f=None):
    global num_tests
    num_tests += 1

    class MStat:
        def __init__(self, model):
            model.eval()
            self.model = model
            self.correct = 0

            class Stat:
                def __init__(self, d, dnm):
                    self.domain = d
                    self.name = dnm
                    self.width = 0
                    self.max_eps = None
                    self.safe = 0
                    self.proved = 0
                    self.time = 0

            self.domains = [
                Stat(h.parseValues(d, goals), h.catStrs(d))
                for d in args.test_domain
            ]

    model_stats = [MStat(m) for m in models]
    dict_map = dict(np.load("./dataset/AG/dict_map.npy").item())
    lines = open("./dataset/en.key1").readlines()
    adjacent_keys = [[] for i in range(len(dict_map))]
    for line in lines:
        tmp = line.strip().split()
        ret = set(tmp[1:]).intersection(dict_map.keys())
        ids = []
        for x in ret:
            ids.append(dict_map[x])
        adjacent_keys[dict_map[tmp[0]]].extend(ids)

    num_its = 0
    saved_data_target = []
    for data, target in test_loader:
        if num_its >= args.test_size:
            break

        if num_tests == 1:
            saved_data_target += list(zip(list(data), list(target)))

        num_its += data.size()[0]
        if num_its % 100 == 0:
            print(num_its, model_stats[0].domains[0].safe * 100.0 / num_its)
        if args.test_swap_delta > 0:
            length = data.size()[1]
            data = data.repeat(1, length)
            for i in data:
                for j in range(length - 1):
                    for _ in range(args.test_swap_delta):
                        t = np.random.randint(0, length)
                        while len(adjacent_keys[int(i[t])]) == 0:
                            t = np.random.randint(0, length)
                        cid = int(i[t])
                        i[j * length + t] = adjacent_keys[cid][0]
            target = (target.view(-1, 1).repeat(1, length)).view(-1)
            data = data.view(-1, length)

        if h.use_cuda:
            data, target = data.cuda().to_dtype(), target.cuda()

        for m in model_stats:

            with torch.no_grad():
                pred = m.model(data).vanillaTensorPart().max(1, keepdim=True)[
                    1]  # get the index of the max log-probability
                m.correct += pred.eq(target.data.view_as(pred)).sum()

            for stat in m.domains:
                timer = Timer(shouldPrint=False)
                with timer:

                    def calcData(data, target):
                        box = stat.domain.box(data,
                                              w=m.model.w,
                                              model=m.model,
                                              untargeted=True,
                                              target=target).to_dtype()
                        with torch.no_grad():
                            bs = m.model(box)
                            org = m.model(data).vanillaTensorPart().max(
                                1, keepdim=True)[1]
                            stat.width += bs.diameter().sum().item(
                            )  # sum up batch loss
                            stat.proved += bs.isSafe(org).sum().item()
                            stat.safe += bs.isSafe(target).sum().item()
                            # stat.max_eps += 0 # TODO: calculate max_eps

                    if m.model.net.neuronCount(
                    ) < 5000 or stat.domain in SYMETRIC_DOMAINS:
                        calcData(data, target)
                    else:
                        if args.test_swap_delta > 0:
                            length = data.size()[1]
                            pre_stat = copy.deepcopy(stat)
                            for i, (d, t) in enumerate(zip(data, target)):
                                calcData(d.unsqueeze(0), t.unsqueeze(0))
                                if (i + 1) % length == 0:
                                    d_proved = (stat.proved -
                                                pre_stat.proved) // length
                                    d_safe = (stat.safe -
                                              pre_stat.safe) // length
                                    d_width = (stat.width -
                                               pre_stat.width) / length
                                    stat.proved = pre_stat.proved + d_proved
                                    stat.safe = pre_stat.safe + d_safe
                                    stat.width = pre_stat.width + d_width
                                    pre_stat = copy.deepcopy(stat)
                        else:
                            for d, t in zip(data, target):
                                calcData(d.unsqueeze(0), t.unsqueeze(0))
                stat.time += timer.getUnitTime()

    l = num_its  # len(test_loader.dataset)
    for m in model_stats:
        if args.lr_multistep:
            m.model.lrschedule.step()

        pr_corr = float(m.correct) / float(l)
        if args.use_schedule:
            m.model.lrschedule.step(1 - pr_corr)

        h.printBoth(
            ('Test: {:12} trained with {:' + str(largest_domain) +
             '} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format(
                 m.model.name, m.model.ty.name, m.model.speed, m.correct, l,
                 100. * pr_corr),
            f=f)

        model_stat_rec = ""
        for stat in m.domains:
            pr_safe = stat.safe / l
            pr_proved = stat.proved / l
            pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
            h.printBoth((
                "\t{:" + str(largest_test_domain) +
                "} - Width: {:<36.16f} Pr[Proved]={:<1.3f}  Pr[Corr and Proved]={:<1.3f}  Pr[Corr|Proved]={:<1.3f} {}Time = {:<7.5f}"
            ).format(
                stat.name, stat.width / l, pr_proved, pr_safe,
                pr_corr_given_proved,
                "AvgMaxEps: {:1.10f} ".format(stat.max_eps / l)
                if stat.max_eps is not None else "", stat.time),
                        f=f)
            model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(
                stat.name, pr_proved, pr_safe, pr_corr_given_proved)
        prepedname = m.model.ty.name.replace(" ", "_").replace(
            ",", "").replace("(", "_").replace(")", "_").replace("=", "_")
        net_file = os.path.join(
            out_dir, m.model.name + "__" + prepedname + "_checkpoint_" +
            str(epoch) + "_with_{:1.3f}".format(pr_corr))

        h.printBoth("\tSaving netfile: {}\n".format(net_file + ".pynet"), f=f)

        if (num_tests % args.save_freq == 1 or args.save_freq
                == 1) and not args.dont_write and (num_tests > 1
                                                   or args.write_first):
            print("Actually Saving")
            torch.save(m.model.net, net_file + ".pynet")
            if args.save_dot_net:
                with h.mopen(args.dont_write, net_file + ".net", "w") as f2:
                    m.model.net.printNet(f2)
                    f2.close()
            if args.onyx:
                nn = copy.deepcopy(m.model.net)
                nn.remove_norm()
                torch.onnx.export(
                    nn,
                    h.zeros([1] + list(input_dims)),
                    net_file + ".onyx",
                    verbose=False,
                    input_names=["actual_input"] + [
                        "param" + str(i)
                        for i in range(len(list(nn.parameters())))
                    ],
                    output_names=["output"])

    if num_tests == 1 and not args.dont_write:
        img_dir = os.path.join(out_dir, "images")
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        for img_num, (img, target) in zip(
                range(args.number_save_images),
                saved_data_target[:args.number_save_images]):
            sz = ""
            for s in img.size():
                sz += str(s) + "x"
            sz = sz[:-1]

            img_file = os.path.join(
                img_dir, args.dataset + "_" + sz + "_" + str(img_num))
            if img_num == 0:
                print("Saving image to: ", img_file + ".img")
            with open(img_file + ".img", "w") as imgfile:
                flatimg = img.view(h.product(img.size()))
                for t in flatimg.cpu():
                    print(decimal.Decimal(float(t)).__format__("f"),
                          file=imgfile)
            with open(img_file + ".class", "w") as imgfile:
                print(int(target.item()), file=imgfile)
示例#8
0
    ] for d in args.domain])

patience = 30
last_best_origin = 0
best_origin = 1e10
last_best = 0
best = 1e10
decay = True
with h.mopen(args.dont_write, os.path.join(out_dir, "log.txt"), "w") as f:
    startTime = timer()
    for epoch in range(1, args.epochs + 1):
        if f is not None:
            f.flush()
        if (epoch - 1) % args.test_freq == 0 and (epoch > 1
                                                  or args.test_first):
            with Timer("test all models before epoch " + str(epoch), 1):
                test(models, epoch, f)
                if f is not None:
                    f.flush()
        h.printBoth("Elapsed-Time: {:.2f}s\n".format(timer() - startTime), f=f)
        if args.epochs <= args.test_freq:
            break
        with Timer("train all models in epoch", 1, f=f):
            val_origin, val = train(epoch, models, decay)
            h.printBoth("Original val loss: %.2f\t Val loss: %.2f\n" %
                        (val_origin, val),
                        f=f)
            if decay:
                if val_origin < best_origin:
                    best_origin = val_origin
                    last_best_origin = epoch
def reconstruct(settings):
    timer_global = Timer()

    # LOAD SETTINGS
    SKIP_LOAD_IMAGES = settings["SKIP_LOAD_IMAGES"]
    SKIP_FIND_FEATURES = settings["SKIP_FIND_FEATURES"]
    SKIP_FIND_MATCHES = settings["SKIP_FIND_MATCHES"]
    SKIP_RECONSTRUCT = settings["SKIP_RECONSTRUCT"]
    SKIP_MERGE = settings["SKIP_MERGE"]
    SKIP_SPARSE_CONVERT = settings["SKIP_SPARSE_CONVERT"]

    # VALIDATE PATHS
    SCRIPT_DIR = settings["SCRIPT_DIR"].rstrip("/")
    assert os.path.exists(SCRIPT_DIR), "ERROR: SCRIPT PATH MISSING AT {}".format(SCRIPT_DIR)

    WORK_DIR = settings["WORK_DIR"].rstrip("/")
    if not os.path.exists(WORK_DIR):
        os.mkdir(WORK_DIR)

    IMAGE_DIR = "images"
    IMAGE_DIR = "{}/{}".format(WORK_DIR, IMAGE_DIR)
    if not os.path.exists(IMAGE_DIR):
        os.mkdir(IMAGE_DIR)

    FRAME_WIDTH = settings["FRAME_WIDTH"]
    FRAME_HEIGHT = settings["FRAME_HEIGHT"]
    if FRAME_WIDTH < 2 * FRAME_HEIGHT:
        FRAME_WIDTH = 2 * FRAME_HEIGHT
    FRAME_FOV = settings["FRAME_FOV"]
    IMAGE_SCALE = settings["IMAGE_SCALE"]


    # VIDEO TIME SETTINGS
    try:
        TIME_START = int(settings["TIME_START"])
        TIME_END = int(settings["TIME_END"])
    except KeyError:
        TIME_START = 0
        TIME_END = int(math.floor(extractor.getEnd()))
    TIME_INTERVAL = settings["TIME_INTERVAL"]
    NUM_FRAMES = int(math.floor((TIME_END - TIME_START) / float(TIME_INTERVAL)))

    # GET PATHS TO VIDEOS
    # VIDEO_PATHS = [settings["VIDEO_PATH"] + path for path in settings["VIDEO_FILES"]]
    # for video in VIDEO_PATHS:
    #     assert os.path.exists(video), "ERROR: VIDEO MISSING AT {}".format(video)
    # VIDEO_PREFIX = settings["VIDEO_FILES"][0].split('_')[0]
    VIDEO_PATH = settings["VIDEO_PATH"]
    assert os.path.exists(VIDEO_PATH), "ERROR: VIDEO MISSING AT {}".format(VIDEO_PATH)
    VIDEO_PREFIX = VIDEO_PATH.split("/")[-1].split(".")[0]

    FIT_REF = settings["FIT_REF"]
    assert os.path.exists(FIT_REF), "ERROR: FIT REF MISSING AT {}".format(FIT_REF)

    if not SKIP_LOAD_IMAGES or not SKIP_FIND_FEATURES:
        # CREATE FRAME EXTRACTOR OBJECT
        print("\nLOADING VIDEOS... {}".format(VIDEO_PATH))
        extractor = VideoExtractor(
            VIDEO_PATH,
            FIT_REF,
            IMAGE_SCALE,
            frame_width=FRAME_WIDTH,
            frame_height=FRAME_HEIGHT,
            fov=FRAME_FOV
            )

    log = LogWriter(WORK_DIR)
    log.heading("IMAGE LOADING")

    if not SKIP_LOAD_IMAGES:
        timer = Timer()

        log.log("Using video:")
        # for path in VIDEO_PATHS:
        #     log.log(path)
        log.log(VIDEO_PATH.split("\\")[-1])

        # ITERATE THROUGH FRAMES
        frame_data = {}
        subflag = "NESW"

        print("\nEXTRACTING FRAMES:\nSTART: {} sec\nEND: {} sec\nINTERVAL: {} sec\n").format(TIME_START, TIME_END, TIME_INTERVAL)

        img_counter = 0
        for frame in range(NUM_FRAMES):

            frame_ID = "{:05d}".format(frame)

            t = TIME_START + frame*TIME_INTERVAL
            data = extractor.extract(t)
            frames = data[:-1]
            record = data[-1]

            file_names = ["{}_{}_{}.jpg".format(VIDEO_PREFIX, subflag[i], frame_ID) for i in xrange(len(frames))]

            print("GENERATING FRAMES @ {d} sec".format(t))

            frame_data[frame_ID] = record

            file_paths = ["{}/{}".format(IMAGE_DIR, file_name) for file_name in file_names]

            for i,file_path in enumerate(file_paths):
                # print file_path
                cv2.imwrite(file_path, frames[i])
                img_counter += 1

            # if exif_exists:
                # extractor.writeEXIF(file_paths, record)

        # SAVE FRAME DATA
        with open("{}/image_data.json".format(WORK_DIR), mode='w') as f:
            json.dump(frame_data, f, sort_keys=True, indent=4, separators=(',', ': '))

        log.log("Successfully loaded {} images ({} sec)".format(img_counter, timer.read()))
    else:
        log.log("Skipped")


    log.heading("FEATURE EXTRACTION")
    if not SKIP_FIND_FEATURES:
        timer = Timer()

        if os.path.exists("{}/database.db".format(WORK_DIR)):
            os.remove("{}/database.db".format(WORK_DIR))

        # frame_dims = extractor.getDims()
        frame_dims = [FRAME_WIDTH, FRAME_HEIGHT]
        camera_model = settings["CAMERA_MODEL"]
        # camera_intrinsics = [settings["CAMERA_FOCAL_LENGTH"]] + [frame_dims[0]/2, frame_dims[1]/2] + settings["CAMERA_PARAMS"]
        camera_intrinsics = [settings["CAMERA_FOCAL_LENGTH"]] + [frame_dims[0]/2, frame_dims[1]/2]
        if settings["CAMERA_PARAMS"]:
            camera_intrinsics.append(settings["CAMERA_PARAMS"])

        camera_intrinsics_str = ",".join([str(d) for d in camera_intrinsics])

        raw_input = "{}/shell/feature_extract.sh {} {} {}".format(SCRIPT_DIR, WORK_DIR, camera_model, camera_intrinsics_str)
        args = shlex.split(raw_input)
        p = subprocess.Popen(args)
        p.wait()

        log.log("Feature extraction ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")


    log.heading("IMAGE MATCHING")
    if not SKIP_FIND_MATCHES:
        timer = Timer()

        # FEATURE MATCHING
        VOCAB_TREE = "{}/vocab_tree/vocab_tree.bin".format(SCRIPT_DIR)
        raw_input = "{}/shell/match_vocabtree.sh {} {}".format(SCRIPT_DIR, WORK_DIR, VOCAB_TREE)
        args = shlex.split(raw_input)
        p = subprocess.Popen(args)
        p.wait()

        log.log("Image matching finished ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")


    log.heading("MODEL RECONSTRUCTION")
    if not SKIP_RECONSTRUCT:
        timer = Timer()

        if not os.path.exists("{}/sparse".format(WORK_DIR)):
            os.mkdir("{}/sparse".format(WORK_DIR))

        target_num = settings["FRAMES_PER_MODEL"]
        overlap = settings["MODEL_OVERLAP"]
        num_chunks = int(round(float((NUM_FRAMES-overlap))/(target_num-overlap)))
        remainder = NUM_FRAMES - (target_num*num_chunks - overlap*num_chunks + overlap)

        log.log("Number of frames in model: {}".format(NUM_FRAMES))
        log.log("Target model size: {}".format(target_num))
        log.log("Model overlap: {}".format(overlap))
        log.log("Models to generate: {}".format(num_chunks))

        models = range(num_chunks)
        # OVERRIDE TO REGENERATE SPECIFIC MODELS
        # models = [0]

        for i in models:

            start_frame = i * (target_num - overlap)
            end_frame = start_frame + target_num

            if i == num_chunks-1:
                end_frame += remainder

            # MAKE IMAGE FILE
            print("\n\nCONSTRUCTING IMAGE LIST\n\n")
            make_image_list(start_frame, end_frame, WORK_DIR, VIDEO_PREFIX)

            MODEL_DIR = "{}/{}/{}".format(WORK_DIR, "sparse", i)
            if not os.path.exists(MODEL_DIR):
                os.mkdir(MODEL_DIR)

            timer_model = Timer()

            # RECONSTRUCT SPARSE
            raw_input = "{}/shell/sparse_reconstruct.sh {} {}".format(SCRIPT_DIR, WORK_DIR, i, 360, 361)
            args = shlex.split(raw_input)
            print args
            p = subprocess.Popen(args)
            p.wait()

            log.log("Constructed model [{}-{}]({} images) in directory: {} ({} sec)".format(start_frame, end_frame, end_frame-start_frame, MODEL_DIR, timer_model.read()))

        log.log("Reconstruction finished ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")


    log.heading("MODEL MERGING")
    if not SKIP_MERGE:
        timer = Timer()

        # FIND RECONSTRUCTED MODELS
        dirs = os.walk("{}/sparse".format(WORK_DIR))
        model_dirs = next(dirs)[1]
        models = []
        for model_dir in model_dirs:
            # try:
            models.append(int(model_dir))
        models.sort()

        log.log("Found {} models: {}".format(len(models), models))

        if len(models) > 1:

            if not os.path.exists("{}/merged".format(WORK_DIR)):
                os.mkdir("{}/merged".format(WORK_DIR))

            model_1 = "{}/sparse/{}/0".format(WORK_DIR,models.pop(0))

            for i,m in enumerate(models):

                model_2 = "{}/sparse/{}/0".format(WORK_DIR,m)
                merge_dir = "{}/merged/{}".format(WORK_DIR,i)
                if not os.path.exists(merge_dir):
                    os.mkdir(merge_dir)

                # MERGE MODEL
                raw_input = "{}/shell/model_merge.sh {} {} {}".format(SCRIPT_DIR, model_1, model_2, merge_dir)
                args = shlex.split(raw_input)
                print args
                p = subprocess.Popen(args)
                p.wait()

                # BUNDLE ADJUSTMENT
                # raw_input = "{}/shell/bundle_adjuster.sh {}".format(SCRIPT_DIR, merge_dir)
                # args = shlex.split(raw_input)
                # print args
                # p = subprocess.Popen(args)
                # p.wait()

                model_1 = merge_dir

                log.log("Merged models:\n{}\n{}\ninto model:\n{}".format(model_1, model_2, merge_dir))
        else:
            log.log("{} model(s) found, at least 2 needed for merging.".format(len(models)))

        log.log("Merging finished ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")

    log.heading("MODEL CONVERSION TO PLY")
    if not SKIP_SPARSE_CONVERT:
        timer = Timer()

        # FIND MERGED MODELS
        models = []
        merge_dir = "{}/merged/0".format(WORK_DIR)
        if os.path.exists(merge_dir):
            models.append(merge_dir)
        else:
            log.log("No merged model found at\n{}".format(merge_dir))
            dirs = os.walk("{}/sparse".format(WORK_DIR))
            model_dirs = next(dirs)[1]
            models = []
            for model_dir in model_dirs:
                # try:
                models.append(int(model_dir))
            models.sort()
            models = ["{}/sparse/{}/0/".format(WORK_DIR,m) for m in models]

        log.log("Found {} models:{}".format(len(models), '\n'.join(models)))

        convert_dir = "{}/sparse_ply".format(WORK_DIR)
        if not os.path.exists(convert_dir):
            os.mkdir(convert_dir)

        for i,model in enumerate(models):
            # MERGE MODEL
            out_model = "{}/sparse_{}.ply".format(convert_dir,i) \
                if len(models) >1 else \
                "{}/sparse_merged.ply".format(convert_dir)

            raw_input = "{}/shell/model_convert.sh {} {} {}".format(SCRIPT_DIR, model, out_model, "PLY")
            args = shlex.split(raw_input)
            print args
            p = subprocess.Popen(args)
            p.wait()

            log.log("Converted model:\n{}\ninto model:\n{}".format(model, out_model))

        log.log("Conversion finished ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")

    log.log("\n...job finished.")
示例#10
0
def run(liczba):
    v = liczba
    wierzcholki = []
    dell = DEL_lkrawedzi(v)
    dfs = DFS_lkrawedz(v)

    for i in range(liczba):
        for z in range(liczba):
            if z > i:
                razem = str(i) + str(z)
                w1 = int(i)
                w2 = int(z)
                dell.addEdge(w1, w2)
                dfs.addEdge(w1, w2)
                wierzcholki.append(razem)

    print("Sortowanie topologiczne przez usuwanie: ")
    timer_sort_topological = Timer()
    timer_sort_topological.start()

    dell.topologicalSort_DEL()

    # print("Lista krawedzi posortowana DELL")
    ilko = 0
    for znajdz in listadell:
        for iins in wierzcholki:
            if znajdz == int(iins[0]):
                ilko = ilko + 1

    timer_sort_topological.stop()

    if not S:
        timer_sort_topological_dfs = Timer()
        timer_sort_topological_dfs.start()
        dfs.topologicalSort_DFS()

        # print("Lista krawedzi posortowana DFS")
        ilko2 = 0
        for znajdz in listadfs:
            for iins in wierzcholki:
                if znajdz == int(iins[0]):
                    ilko2 = ilko2 + 1
                    'print("(", iins[0], ",", iins[1], ")")'
        timer_sort_topological_dfs.stop()

        return {
            'sort_top_del': timer_sort_topological.get_mean_time(),
            'sort_top_dfs': timer_sort_topological_dfs.get_mean_time()
        }

    else:
        return False
示例#11
0
def test(models, epoch, f = None):
    global num_tests
    num_tests += 1
    class MStat:
        def __init__(self, model):
            model.eval()
            self.model = model
            self.correct = 0
            class Stat:
                def __init__(self, d, dnm):
                    self.domain = d
                    self.name = dnm
                    self.width = 0
                    self.max_eps = 0
                    self.safe = 0
                    self.proved = 0
                    self.time = 0
            self.domains = [ Stat(h.parseValues(domains,d), h.catStrs(d)) for d in args.test_domain ]
    model_stats = [ MStat(m) for m in models ]
        
    num_its = 0
    saved_data_target = []
    for data, target in test_loader:
        if num_its >= args.test_size:
            break

        if num_tests == 1:
            saved_data_target += list(zip(list(data), list(target)))
        
        num_its += data.size()[0]
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for m in model_stats:

            with torch.no_grad():
                pred = m.model(data).data.max(1, keepdim=True)[1] # get the index of the max log-probability
                m.correct += pred.eq(target.data.view_as(pred)).sum()

            for stat in m.domains:
                timer = Timer(shouldPrint = False)
                with timer:
                    def calcData(data, target):
                        box = stat.domain.box(data, m.model.w, model=m.model, untargeted = True, target=target)
                        with torch.no_grad():
                            bs = m.model(box)
                            org = m.model(data).max(1,keepdim=True)[1]
                            stat.width += bs.diameter().sum().item() # sum up batch loss
                            stat.proved += bs.isSafe(org).sum().item()
                            stat.safe += bs.isSafe(target).sum().item()
                            stat.max_eps += 0 # TODO: calculate max_eps

                    if m.model.net.neuronCount() < 5000 or stat.domain in SYMETRIC_DOMAINS:
                        calcData(data, target)
                    else:
                        for d,t in zip(data, target):
                            calcData(d.unsqueeze(0),t.unsqueeze(0))
                stat.time += timer.getUnitTime()
                
    l = num_its # len(test_loader.dataset)
    for m in model_stats:

        pr_corr = float(m.correct) / float(l)
        if args.use_schedule:
            m.model.lrschedule.step(1 - pr_corr)
        
        h.printBoth(('Test: {:12} trained with {:'+ str(largest_domain) +'} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format(
            m.model.name, m.model.ty.name,
            m.model.speed,
            m.correct, l, 100. * pr_corr), f = f)
        
        model_stat_rec = ""
        for stat in m.domains:
            pr_safe = stat.safe / l
            pr_proved = stat.proved / l
            pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
            h.printBoth(("\t{:" + str(largest_test_domain)+"} - Width: {:<36.16f} Pr[Proved]={:<1.3f}  Pr[Corr and Proved]={:<1.3f}  Pr[Corr|Proved]={:<1.3f} AvgMaxEps: {:1.10f} Time = {:<7.5f}").format(
                stat.name, 
                stat.width / l, 
                pr_proved, 
                pr_safe, pr_corr_given_proved, 
                stat.max_eps / l,
                stat.time), f = f)
            model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(stat.name, pr_proved, pr_safe, pr_corr_given_proved)
        prepedname = m.model.ty.name.replace(" ", "_").replace(",", "").replace("(", "_").replace(")", "_").replace("=", "_")
        net_file = os.path.join(out_dir, m.model.name +"__" +prepedname + "_checkpoint_"+str(epoch)+"_with_{:1.3f}".format(pr_corr))

        h.printBoth("\tSaving netfile: {}\n".format(net_file + ".net"), f = f)

        if num_tests % args.save_freq == 1 or args.save_freq == 1 and not args.dont_write:
            torch.save(m.model.net, net_file + ".pynet")
            
            with h.mopen(args.dont_write, net_file + ".net", "w") as f2:
                m.model.net.printNet(f2)
                f2.close()
            if args.onyx:
                nn = copy.deepcopy(m.model.net)
                nn.remove_norm()
                torch.onnx.export(nn, h.zeros([1] + list(input_dims)), net_file + ".onyx", 
                                  verbose=False, input_names=["actual_input"] + ["param"+str(i) for i in range(len(list(nn.parameters())))], output_names=["output"])


    if num_tests == 1 and not args.dont_write:
        img_dir = os.path.join(out_dir, "images")
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        for img_num,(img,target) in zip(range(args.number_save_images), saved_data_target[:args.number_save_images]):
            sz = ""
            for s in img.size():
                sz += str(s) + "x"
            sz = sz[:-1]

            img_file = os.path.join(img_dir, args.dataset + "_" + sz + "_"+ str(img_num))
            if img_num == 0:
                print("Saving image to: ", img_file + ".img")
            with open(img_file + ".img", "w") as imgfile:
                flatimg = img.view(h.product(img.size()))
                for t in flatimg.cpu():
                    print(decimal.Decimal(float(t)).__format__("f"), file=imgfile)
            with open(img_file + ".class" , "w") as imgfile:
                print(int(target.item()), file=imgfile)
示例#12
0
import sqlite3
import sqlalchemy as sa

from helpers import Timer

connection = sqlite3.connect('litedb')
cursor = connection.cursor()
with Timer('query'):
    cursor.execute(
        """SELECT sum(invoice_item.price) AS total, user.name AS user_name, user.surname AS user_surname
    FROM user
           JOIN account ON user.id = account.user_id
           JOIN invoice ON account.id = invoice.account_id
           JOIN invoice_item ON invoice.id = invoice_item.invoice_id
    WHERE account.id IN (SELECT A.account_id
                         FROM (SELECT count(invoice.id) AS total, invoice.account_id AS account_id
                               FROM invoice
                               GROUP BY invoice.account_id) AS A
                         WHERE A.total = (SELECT max(B.total) AS max_1
                                               FROM (SELECT count(invoice.id) AS total
                                                     FROM invoice
                                                     GROUP BY invoice.account_id) AS B))
    GROUP BY user.name, user.surname
    ORDER BY sum(invoice_item.price) DESC""")
    r2 = cursor.fetchall()
print(r2)
for row in r2:
    print(row[1], row[2], "Total value", row[0])
示例#13
0
import sqlalchemy as sa

from helpers import Timer

connection = sqlite3.connect('litedb')
cursor = connection.cursor()
cursor.execute('DELETE FROM invoice')
cursor.execute('DELETE FROM invoice_item')
cursor.execute('DELETE FROM product')
cursor.execute('DELETE FROM product_stock')
connection.commit()
cursor.execute("SELECT id FROM account LIMIT 1")
account_id = cursor.fetchone()[0]

with Timer('raw'):
    cursor.execute('BEGIN')
    invoice_insert = f"INSERT INTO invoice (account_id, invoice_number) VALUES ({account_id}, 'test')"
    cursor.execute(invoice_insert)
    cursor.execute("SELECT last_insert_rowid()")

    invoice_id = cursor.fetchone()[0]
    products = []
    prd_insert = "INSERT INTO product (name) VALUES"
    for _ in range(10000):
        products.append(
            f"('{random.choice(string.ascii_uppercase) + str(random.randint(1, 10000))}')"
        )
    cursor.execute(prd_insert + ','.join(products))
    cursor.execute("SELECT id FROM product")
    prd_ids = cursor.fetchall()
示例#14
0
def surprise_svd_best_params(train_path="datas/train.csv",
                             test_path="datas/test.csv",
                             verbose=True,
                             t=Timer()):
    # reader with rating scale
    reader = Reader(line_format='user item rating',
                    sep=',',
                    rating_scale=(1, 5))
    # load data from df
    data = Dataset.load_from_file(train_path, reader)

    #svd parameters
    #     n_factors = [50, 100, 200]
    #     n_epochss = np.linspace(200, 40, 9, dtype=np.int32)
    n_epochss = [200, 500, 800]
    reg_alls = np.logspace(-2, -5, 4)
    #     lr_bus = np.logspace(-10, -2, 9)
    #     lr_qis = np.logspace(-10, -2, 9)
    params = dict()
    rmses = dict()

    t.start()

    ## ------rmse: 1.0665431544988566, n_factor:50, n_epoch: 200, reg_all: 0.01, lr_bu: 1e-09, lr_qi: 1e-05------
    params['lr_bu'] = 1e-09
    params['lr_qi'] = 1e-05
    params['n_factor'] = 50
    lr_bu = 1e-09
    lr_qi = 1e-05
    n_factor = 50
    for n_epoch in n_epochss:
        params['n_epoch'] = n_epoch
        for reg_all in reg_alls:
            params['reg_all'] = reg_all
            #             for lr_bu in lr_bus:
            #                 params['lr_bu'] = lr_bu
            #                 for lr_qi in lr_qis:
            #                     params['lr_qi'] = lr_qi
            #                     for n_factor in n_factors:
            #                         params['n_factor'] = n_factor

            algo = SVD(n_factors=n_factor,
                       n_epochs=n_epoch,
                       reg_all=reg_all,
                       lr_bu=lr_bu,
                       lr_qi=lr_qi,
                       verbose=False)
            rmse = surprise_cv_algo(data, algo)
            print(
                "------Time:{}, rmse: {}, n_factor:{}, n_epoch: {}, reg_all: {}, lr_bu: {}, lr_qi: {}------\n\n"
                .format(t.now(), rmse, n_factor, n_epoch, reg_all, lr_bu,
                        lr_qi))
            rmses[rmse] = params

    # Find the model with least RMSE
    lowest_rmse = min(rmses.keys())
    best_params = rmses[lowest_rmse]

    print("Best svd rmse: {}, n_epoch: {}, reg_all: {}, lr_bu: {}, lr_qi: {}".
          format(lowest_rmse, best_params['n_epoch'], best_params['reg_all'],
                 best_params['lr_bu'], best_params['lr_qi']))

if __name__ == '__main__':
    directory = 'data'
    counties = []
    total_number_of_lines = 0
    data_file_size = 0
    for file_name in os.listdir(directory):
        if file_name.endswith(".csv"):
            path = os.path.join(directory, file_name)
            counties.append(path)
            total_number_of_lines += get_number_of_lines_in_file(path)
            data_file_size = os.path.getsize(path)

    timer = Timer(keys={
        'parts': len(counties),
        'total': total_number_of_lines
    })
    timer.output_string = \
        '\n{}/{} {} "{}"' +\
        '\nRead and shrinked {:0.1f}% of this county after {}' +\
        '\n{:0.1f}% total, calculating this prosess should finish in {}, with an average of {:0.0f} lines/second'
    timer.output_format = lambda timer: (
        timer.keys['this_part'] + 1,
        timer.keys['parts'],
        strftime("%H:%M:%S", gmtime(timer.total_time_spent)),
        timer.keys['filename'],
        timer.progress / timer.keys['this_total'] * 100,
        strftime("%H:%M:%S", gmtime(timer.now - timer.start_time)),
        timer.lines_cycled_real / timer.keys['total'] * 100,
        strftime("%H:%M:%S", gmtime(
            (timer.keys['total'] - timer.lines_cycled_real) / (
示例#16
0
def reconstruct(settings):
    timer_global = Timer()

    SKIP_UNDISTORT = settings["SKIP_UNDISTORT"]
    SKIP_STEREO = settings["SKIP_STEREO"]
    SKIP_FUSION = settings["SKIP_FUSION"]

    # VALIDATE PATHS
    SCRIPT_DIR = settings["SCRIPT_DIR"].rstrip("/")
    assert os.path.exists(
        SCRIPT_DIR), "ERROR: SCRIPT PATH MISSING AT {}".format(SCRIPT_DIR)

    WORK_DIR = settings["WORK_DIR"].rstrip("/")
    if not os.path.exists(WORK_DIR):
        os.mkdir(WORK_DIR)

    IMAGE_DIR = settings["IMAGE_DIR"]
    IMAGE_DIR = "{}/{}".format(WORK_DIR, IMAGE_DIR)
    if not os.path.exists(IMAGE_DIR):
        os.mkdir(IMAGE_DIR)

    MODEL_DIR = settings["MODEL_DIR"]
    MODEL_DIR = "{}/{}".format(WORK_DIR, MODEL_DIR)
    if not os.path.exists(MODEL_DIR):
        os.mkdir(MODEL_DIR)

    OUTPUT_DIR = settings["OUTPUT_DIR"]
    OUTPUT_DIR = "{}/{}".format(WORK_DIR, OUTPUT_DIR)
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    log = LogWriter(WORK_DIR)
    log.heading("DENSE RECONSTRUCTION")

    log.log("Undistorting images...")
    if not SKIP_UNDISTORT:
        timer = Timer()

        raw_input = "{}/shell/dense_undistort.sh {} {} {}".format(
            SCRIPT_DIR, IMAGE_DIR, MODEL_DIR, OUTPUT_DIR)
        args = shlex.split(raw_input)
        p = subprocess.Popen(args)
        p.wait()

        log.log("...complete ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")

    log.log("Stereo reconstruction...")
    if not SKIP_STEREO:
        timer = Timer()

        raw_input = "{}/shell/dense_stereo.sh {}".format(
            SCRIPT_DIR, OUTPUT_DIR)
        args = shlex.split(raw_input)
        p = subprocess.Popen(args)
        p.wait()

        log.log("...complete ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")

    log.log("Dense fusion...")
    if not SKIP_FUSION:
        timer = Timer()

        raw_input = "{}/shell/dense_fusion.sh {}".format(
            SCRIPT_DIR, OUTPUT_DIR)
        args = shlex.split(raw_input)
        p = subprocess.Popen(args)
        p.wait()

        log.log("...complete ({} sec)".format(timer.read()))
    else:
        log.log("Skipped")

    log.log("\n...job finished ({} sec).".format(timer_global.read()))
示例#17
0
def train(epoch, models):
    global total_batches_seen

    for model in models:
        model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        total_batches_seen += 1
        time = float(total_batches_seen) / len(train_loader)
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            model.global_num += data.size()[0]

            timer = Timer(
                "train a sample from " + model.name + " with " + model.ty.name,
                data.size()[0], False)
            lossy = 0
            with timer:
                for s in model.getSpec(data.to_dtype(), target, time=time):
                    model.optimizer.zero_grad()
                    loss = model.aiLoss(*s, time=time, **vargs).mean(dim=0)
                    lossy += loss.detach().item()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals")
                        if p is not None and p.grad is not None and torch.isnan(
                                p.grad).any():
                            print("Such nan in postmagic")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.grad = torch.where(
                                torch.isnan(p.grad),
                                torch.normal(mean=h.zeros(p.grad.shape),
                                             std=stdv), p.grad)

                    model.optimizer.step()

                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals after grad")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.data = torch.where(
                                torch.isnan(p.data),
                                torch.normal(mean=h.zeros(p.data.shape),
                                             std=stdv), p.data)

                    if args.clip_norm:
                        model.clip_norm()
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            raise Exception("Such nan in vals after clip")

            model.addSpeed(timer.getUnitTime())

            if batch_idx % args.log_interval == 0:
                print((
                    'Train Epoch {:12} {:' + str(largest_domain) +
                    '}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}'
                ).format(model.name, model.ty.name, epoch,
                         batch_idx * len(data), len(train_loader.dataset),
                         100. * batch_idx / len(train_loader), model.speed,
                         lossy))
示例#18
0
def train(epoch, models, decay=True):
    global total_batches_seen

    for model in models:
        model.train()
        #if args.decay_fir:
        #    if epoch > 1 and isinstance(model.ty, goals.DList) and len(model.ty.al) == 2 and decay:
        #        for (i, a) in enumerate(model.ty.al):
        #            if i == 1:
        #                model.ty.al[i] = (a[0], Const(min(a[1].getVal() + 0.0025, 0.75)))
        #            else:
        #                model.ty.al[i] = (a[0], Const(max(a[1].getVal() - 0.0025, 0.25)))

    for batch_idx, (data, target) in enumerate(train_loader):
        if total_batches_seen * args.batch_size % 4000 == 0:
            for model in models:
                if args.decay_fir:
                    if isinstance(model.ty, goals.DList) and len(
                            model.ty.al) == 2 and decay:
                        for (i, a) in enumerate(model.ty.al):
                            if i == 1:
                                model.ty.al[i] = (a[0],
                                                  Const(
                                                      min(
                                                          a[1].getVal() +
                                                          0.0025, 3)))
                            # else:
                            #    model.ty.al[i] = (a[0], Const(max(a[1].getVal() - 0.00075, 0.25)))

        total_batches_seen += 1
        time = float(total_batches_seen) / len(train_loader)
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            model.global_num += data.size()[0]

            timer = Timer(
                "train a sample from " + model.name + " with " + model.ty.name,
                data.size()[0], False)
            lossy = 0
            with timer:
                for s in model.getSpec(data.to_dtype(), target, time=time):
                    model.optimizer.zero_grad()
                    loss = model.aiLoss(*s, time=time, **vargs).mean(dim=0)
                    lossy += loss.detach().item()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals")
                        if p is not None and p.grad is not None and torch.isnan(
                                p.grad).any():
                            print("Such nan in postmagic")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.grad = torch.where(
                                torch.isnan(p.grad),
                                torch.normal(mean=h.zeros(p.grad.shape),
                                             std=stdv), p.grad)

                    model.optimizer.step()

                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals after grad")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.data = torch.where(
                                torch.isnan(p.data),
                                torch.normal(mean=h.zeros(p.data.shape),
                                             std=stdv), p.data)

                    if args.clip_norm:
                        model.clip_norm()
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            raise Exception("Such nan in vals after clip")

            model.addSpeed(timer.getUnitTime())

            if batch_idx % args.log_interval == 0:
                print((
                    'Train Epoch {:12} {:' + str(largest_domain) +
                    '}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}'
                ).format(model.name, model.ty.name, epoch,
                         batch_idx * len(data), len(train_loader.dataset),
                         100. * batch_idx / len(train_loader), model.speed,
                         lossy))

    val = 0
    val_origin = 0
    batch_cnt = 0
    for batch_idx, (data, target) in enumerate(val_loader):
        batch_cnt += 1
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            for s in model.getSpec(data.to_dtype(), target):
                loss = model.aiLoss(*s, **vargs).mean(dim=0)
                val += loss.detach().item()

            loss = model.aiLoss(data, target, **vargs).mean(dim=0)
            val_origin += loss.detach().item()

    return val_origin / batch_cnt, val / batch_cnt
示例#19
0
def next_genn_targeted(cells):
    # Only consider cells next to live cells.
    possible = set(nc for c in cells for nc in neighborsn(c))
    ncells = set()
    for pc in possible:
        ncount = sum(1 for nc in neighborsn(pc) if nc in cells)
        if pc in cells:
            if ncount in (2, 3):
                ncells.add(pc)
        else:
            if ncount == 3:
                ncells.add(pc)
    return ncells

def partn(cells):
    cells6 = nth(iterate(next_genn_targeted, cells), 6)
    return len(cells6)

def test_part1n():
    assert partn(ncells_from_text(TEST, 3)) == 112

def test_part2n():
    assert partn(ncells_from_text(TEST, 4)) == 848

if __name__ == '__main__':
    print(f"Part 1: {partn(ncells_from_text(INPUT, 3))}")
    with Timer() as timer:
        ans = partn(ncells_from_text(INPUT, 4))
    print(f"Part 2: {ans}, {timer.duration:.1f}s")
示例#20
0
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker

from helpers import Timer
from models import Account, Invoice, Product, InvoiceItem, ProductStock

engine = sa.engine.create_engine('sqlite:///litedb', echo=False)
session = sessionmaker(bind=engine)()

session.query(Invoice).delete()
session.query(InvoiceItem).delete()
session.query(Product).delete()
session.query(ProductStock).delete()
session.commit()

account = session.query(Account).first()
with Timer('Insert big invoice'):
    invoice = Invoice(account=account, invoice_number='test')

    for _ in range(10000):
        product = Product(name=random.choice(string.ascii_uppercase) +
                          str(random.randint(1, 10000)))

        invoice_item = InvoiceItem(invoice=invoice,
                                   product=product,
                                   quantity=1,
                                   price=1)

    session.add(invoice)
    session.commit()
示例#21
0
def run(liczba):

    v = liczba
    wierzcholki = []
    b= [[0]*v for _ in range(v)]
    dell = DEL_lkrawedzi(v)
    dfs = DFS_lkrawedz(v)
    for i in range(liczba):
        for z in range(liczba):
            if z>i:
                razem = str(i)+str(z)
                w1 = int(i)
                w2 = int(z)
                dell.addEdge(w1,w2)
                dfs.addEdge(w1,w2)
                b[w1][w2] = 1
                wierzcholki.append(razem)


    print("Sortowanie topologiczne przez usuwanie: ")

    timer_sort_topological_del = Timer()
    timer_sort_topological_del.start()

    dell.topologicalSort_DEL()

    #print("Macierz sasiedztwa posortowana DELL:")

    for kl in listadell:
        'print(kl," - " ,b[kl])'

    timer_sort_topological_del.stop()

    if S == False:

        print("\n")
        print("Sortowanie topologiczne metoda DFS: ")

        timer_sort_topological_dfs = Timer()
        timer_sort_topological_dfs.start()

        #print("Macierz sasiedztwa posortowana DFS:")
        dfs.topologicalSort_DFS()
        timer_sort_topological_dfs.stop()

        return {
            'sort_top_del': timer_sort_topological_del.get_mean_time(),
            'sort_top_dfs': timer_sort_topological_dfs.get_mean_time()
        }

    else:
        return False