Exemplo n.º 1
0
 def get_reduced(x, all_possible_sub):
     num_e = h.product(x.size())
     view_num = all_possible_sub * h.product(self.in_shape)
     if num_e >= view_num and num_e % view_num == 0:  # convert to Box (HybirdZonotope)
         lower = x.min(1)[0]
         upper = x.max(1)[0]
         return ai.HybridZonotope((lower + upper) / 2,
                                  (upper - lower) / 2, None)
     else:  # if it is a Point()
         assert False
Exemplo n.º 2
0
    def init(self, prev, out_shape, **kargs):
        self.in_neurons = h.product(prev)
        if isinstance(out_shape, int):
            out_shape = [out_shape]
        self.out_neurons = h.product(out_shape) 
        
        self.weight = torch.nn.Parameter(torch.Tensor(self.in_neurons, self.out_neurons))
        self.bias = torch.nn.Parameter(torch.Tensor(self.out_neurons))

        return out_shape
Exemplo n.º 3
0
    def correlate(
            self,
            cc_indx_batch_beta):  # given in terms of the flattened matrix.
        num_correlate = h.product(cc_indx_batch_beta.shape[1:])

        beta = h.zeros(
            self.head.shape).to_dtype() if self.beta is None else self.beta
        errors = h.zeros([0] + list(self.head.shape)).to_dtype(
        ) if self.errors is None else self.errors

        batch_size = beta.shape[0]
        new_errors = h.zeros([num_correlate] +
                             list(self.head.shape)).to_dtype()

        inds_i = torch.arange(batch_size, device=h.device).unsqueeze(1).long()

        nc = torch.arange(num_correlate, device=h.device).unsqueeze(1).long()

        new_errors = new_errors.permute(
            1, 0,
            *list(range(len(new_errors.shape)))[2:]).contiguous().view(
                batch_size, num_correlate, -1)
        new_errors[inds_i, nc.unsqueeze(0).expand([batch_size] + list(nc.shape)).squeeze(2), cc_indx_batch_beta] = \
            beta.view(batch_size, -1)[inds_i, cc_indx_batch_beta]

        new_errors = new_errors.permute(
            1, 0,
            *list(range(len(new_errors.shape)))[2:]).contiguous().view(
                num_correlate, batch_size, *beta.shape[1:])
        errors = torch.cat((errors, new_errors), dim=0)

        beta.view(batch_size, -1)[inds_i, cc_indx_batch_beta] = 0

        return self.new(self.head, beta, errors)
Exemplo n.º 4
0
def part2(bus_ids):
    base_mods = [(b, m) for m, b in enumerate(bus_ids) if b is not None]
    bases = [b for b, _ in base_mods]
    p = product(bases)
    x = 0
    for b, m in base_mods:
        pp = p // b
        x += modular_inverse(pp, b) * pp * (b - m)
    return x % lcm(bases)
    def boxBetween(self, o1, o2,  *args, **kargs):
        batches = o1.size()[0]
        num_elem = h.product(o1.size()[1:])
        ei = h.getEi(batches, num_elem)
        
        if len(o1.size()) > 2:
            ei = ei.contiguous().view(num_elem, *o1.size())

        return self.domain((o1 + o2) / 2, None, ei * (o2 - o1).abs() / 2).checkSizes()
Exemplo n.º 6
0
 def line(o1, o2, w=None):
     ln = ((o2 - o1) / 2).unsqueeze(0)
     if not w is None and w > 0.0:
         batches = o1.size()[0]
         num_elem = h.product(o1.size()[1:])
         ei = h.getEi(batches, num_elem)
         if len(o1.size()) > 2:
             ei = ei.contiguous().view(num_elem, *o1.size())
         ln = torch.cat([ln, ei * w])
     return HBox((o1 + o2) / 2, None, ln).checkSizes()
Exemplo n.º 7
0
    def applySuper(self, ret):
        batches = ret.head.size()[0]
        num_elem = h.product(ret.head.size()[1:])
        ei = h.getEi(batches, num_elem)

        if len(ret.head.size()) > 2:
            ei = ei.contiguous().view(num_elem, *ret.head.size())

        ret.errors = torch.cat([ ret.errors, ei * ret.beta ]) if not ret.beta is None else ret.errors
        ret.beta = None
        return ret.checkSizes()
    def line(self, o1, o2, **kargs):
        w = self.w.getVal(c = 0, **kargs)

        ln = ((o2 - o1) / 2).unsqueeze(0)
        if not w is None and w > 0.0:
            batches = o1.size()[0]
            num_elem = h.product(o1.size()[1:])
            ei = h.getEi(batches,num_elem)
            if len(o1.size()) > 2:
                ei = ei.contiguous().view(num_elem, *o1.size())
            ln = torch.cat([ln, ei * w])
        return self.domain((o1 + o2) / 2, None, ln ).checkSizes()
Exemplo n.º 9
0
    def box(original, radius):
        """
        This version of it is slow, but keeps correlation down the line.
        """
        batches = original.size()[0]
        num_elem = h.product(original.size()[1:])
        ei = h.getEi(batches, num_elem)

        if len(original.size()) > 2:
            ei = ei.contiguous().view(num_elem, *original.size())

        return HBox(original, None, ei * radius).checkSizes()
Exemplo n.º 10
0
    def correlateMaxK(self, num_correlate):
        if num_correlate == 0:
            return self

        domshape = self.head.shape
        batch_size = domshape[0]
        num_pixs = h.product(domshape[1:])
        num_correlate = min(num_correlate, num_pixs)

        concrete_max_image = self.ub().view(batch_size, -1)

        cc_indx_batch_beta = concrete_max_image.topk(num_correlate)[1]
        return self.correlate(cc_indx_batch_beta)
Exemplo n.º 11
0
    def box(self, original, w, **kargs):
        """
        This version of it is slow, but keeps correlation down the line.
        """
        radius = self.w.getVal(c = w, **kargs)

        batches = original.size()[0]
        num_elem = h.product(original.size()[1:])
        ei = h.getEi(batches,num_elem)
        
        if len(original.size()) > 2:
            ei = ei.contiguous().view(num_elem, *original.size())

        return self.domain(original, None, ei * radius).checkSizes()
Exemplo n.º 12
0
    def stochasticCorrelate(self, num_correlate, choices=None):
        if num_correlate == 0:
            return self

        domshape = self.head.shape
        batch_size = domshape[0]
        num_pixs = h.product(domshape[1:])
        num_correlate = min(num_correlate, num_pixs)
        ucc_mask = h.ones([batch_size, num_pixs]).long()

        cc_indx_batch_beta = h.cudify(
            torch.multinomial(
                ucc_mask.to_dtype(), num_correlate,
                replacement=False)) if choices is None else choices
        return self.correlate(cc_indx_batch_beta)
Exemplo n.º 13
0
def first_solution():
    string = ''
    posibilities_left = 10**6 - 1
    numbers_left = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    while True:
        number = 0
        to_substract = 0
        for x in range(1, len(numbers_left) + 1):
            posibilities_with_nums = product(list(range(1, x + 1)))
            if posibilities_with_nums > posibilities_left:
                to_substract = product(list(range(1, x)))
                break
            elif posibilities_with_nums == posibilities_left:
                to_substract = product(list(range(1, x + 1)))
                break
        fits = int(posibilities_left / to_substract)
        number = numbers_left.pop(fits)
        string += str(number)
        posibilities_left -= int(
            posibilities_left / to_substract) * to_substract
        if posibilities_left == 0:
            break
    string += ''.join([str(num) for num in numbers_left])
    return string
Exemplo n.º 14
0
    def abstract_forward(self, x):
        sz = x.size()
        """
        # for more control in the future
        indxs_1 = torch.arange(start = 0, end = sz[1], step = math.ceil(sz[1] / self.dims[1]) )
        indxs_2 = torch.arange(start = 0, end = sz[2], step = math.ceil(sz[2] / self.dims[2]) )
        indxs_3 = torch.arange(start = 0, end = sz[3], step = math.ceil(sz[3] / self.dims[3]) )

        indxs = torch.stack(torch.meshgrid((indxs_1,indxs_2,indxs_3)), dim=3).view(-1,3)
        """
        szm = h.product(sz[1:])
        indxs = torch.arange(start=0, end=szm, step=math.ceil(szm / self.k))
        indxs = indxs.unsqueeze(0).expand(sz[0], indxs.size()[0])

        return x.abstractApplyLeaf("correlate", indxs)
Exemplo n.º 15
0
    def correlateMaxPool(self,
                         *args,
                         max_type=MaxTypes.ub,
                         max_pool=F.max_pool2d,
                         **kargs):
        domshape = self.head.shape
        batch_size = domshape[0]
        num_pixs = h.product(domshape[1:])

        concrete_max_image = max_type(self)

        cc_indx_batch_beta = max_pool(concrete_max_image,
                                      *args,
                                      return_indices=True,
                                      **kargs)[1].view(batch_size, -1)

        return self.correlate(cc_indx_batch_beta)
Exemplo n.º 16
0
    def reset_parameters(self):
        if not hasattr(self, 'weight') or self.weight is None:
            return
        n = h.product(self.weight.size()) / self.outShape[0]
        stdv = 1. / math.sqrt(n)
        if self.normal:
            #stdv *= math.sqrt(2)
            self.weight.data.normal_(0, stdv)
            self.weight.data.clamp_(-1, 1)
        else:
            self.weight.data.uniform_(-stdv, stdv)

        if self.bias is not None:
            if self.normal:
                self.bias.data.normal_(0, stdv)
                self.bias.data.clamp_(-1, 1)
            else:
                self.bias.data.uniform_(-stdv, stdv)
Exemplo n.º 17
0
    def hybrid_to_zono(self, *args, correlate=True, customRelu=None, **kargs):
        beta = self.beta
        errors = self.errors
        if correlate and beta is not None:
            batches = beta.shape[0]
            num_elem = h.product(beta.shape[1:])
            ei = h.getEi(batches, num_elem)

            if len(beta.shape) > 2:
                ei = ei.contiguous().view(num_elem, *beta.shape)
            err = ei * beta
            errors = torch.cat(
                (err, errors), dim=0) if errors is not None else err
            beta = None

        return Zonotope(
            self.head,
            beta,
            errors if errors is not None else (self.beta * 0).unsqueeze(0),
            customRelu=self.customRelu if customRelu is None else None)
Exemplo n.º 18
0
def factorise(n):
    if type(n) != int or n < 1:
        raise ValueError("Number must be a POSITIVE INTEGER")

    print("Factorizing {} ({} digits)...".format(n, len(str(n))))

    if n == 1:
        return []

    if is_probable_prime(n):
        return [n]

    factors, rem = find_small_primes(n, 10000)

    if factors:
        print("Prime factors found so far:")
        factors_temp = []
        for _ in factors:
            if _ not in factors_temp:
                factors_temp.append(_)
        print(*factors_temp, sep=', ')
    else:
        print("No small factors found!")

    if rem != 1:
        digits = len(str(rem))
        if digits > 30:
            print("Attempting Quick Pollard's rho (Brent's variation) to " + \
                  "find slightly larger factors...")
            rem = pollard_brent_iterator(rem, factors)
        if rem > 1:
            for f in find_all_prime_factors(rem):
                factors.append(f)

    factors.sort()
    assert helpers.product(factors) == n
    for p in factors:
        assert is_probable_prime(p)
    return factors
Exemplo n.º 19
0
    def reset_parameters(self):
        if not hasattr(self, 'weight') or self.weight is None:
            return
        n = h.product(self.weight.size()) / self.outShape[0]
        stdv = 1 / math.sqrt(n)

        if self.ibp_init:
            torch.nn.init.orthogonal_(self.weight.data)
        elif self.normal:
            self.weight.data.normal_(0, stdv)
            self.weight.data.clamp_(-1, 1)
        else:
            self.weight.data.uniform_(-stdv, stdv)

        if self.bias is not None:
            if self.ibp_init:
                self.bias.data.zero_()
            elif self.normal:
                self.bias.data.normal_(0, stdv)
                self.bias.data.clamp_(-1, 1)
            else:
                self.bias.data.uniform_(-stdv, stdv)
Exemplo n.º 20
0
 def init(self, in_shape, out_shape, **kargs):
     assert (h.product(in_shape) == h.product(out_shape))
     return out_shape
Exemplo n.º 21
0
    def init(self, in_shape, w, **kargs):
        self.w = w
        self.outChan = int(h.product(in_shape) / (w * w))

        return (self.outChan, self.w, self.w)
Exemplo n.º 22
0
 def forward(self, x, **kargs):
     s = x.size()
     return x.view(s[0], h.product(s[1:]))
Exemplo n.º 23
0
 def init(self, in_shape, **kargs):
     return h.product(in_shape)
Exemplo n.º 24
0
 def neuronCount(self):
     return h.product(self.outShape)
Exemplo n.º 25
0
    nets = []

for n in args.net:
    m = getattr(models, n)
    net_create = (
        lambda m: lambda: buildNet(m)
    )(m)  # why doesn't python do scoping right?  This is a thunk.  It is bad.
    net_create.__name__ = n
    net = buildNet(m)
    net.__name__ = n
    nets += [(net, net_create)]

    print("Name: ", net_create.__name__)
    print("Number of Neurons (relus): ", net.neuronCount())
    print("Number of Parameters: ",
          sum([h.product(s.size()) for s in net.parameters()]))
    print("Depth (relu layers): ", net.depth())
    print()
    net.showNet()
    print()

if args.domain == []:
    models = [createModel(net, goals.Box(args.width), "Box") for net in nets]
else:
    models = h.flat([[
        createModel(net, h.parseValues(d, goals, scheduling), h.catStrs(d))
        for net in nets
    ] for d in args.domain])

patience = 30
last_best_origin = 0
Exemplo n.º 26
0
 def forward(self, x, **kargs):
     s = x.size()
     x = x.view(s[0], h.product(s[1:]))
     return (x.matmul(self.weight) + self.bias).view(s[0], *self.outShape)
Exemplo n.º 27
0
def test(models, epoch, f=None):
    global num_tests
    num_tests += 1

    class MStat:
        def __init__(self, model):
            model.eval()
            self.model = model
            self.correct = 0

            class Stat:
                def __init__(self, d, dnm):
                    self.domain = d
                    self.name = dnm
                    self.width = 0
                    self.max_eps = None
                    self.safe = 0
                    self.proved = 0
                    self.time = 0

            self.domains = [
                Stat(h.parseValues(d, goals), h.catStrs(d))
                for d in args.test_domain
            ]

    model_stats = [MStat(m) for m in models]
    dict_map = dict(np.load("./dataset/AG/dict_map.npy").item())
    lines = open("./dataset/en.key1").readlines()
    adjacent_keys = [[] for i in range(len(dict_map))]
    for line in lines:
        tmp = line.strip().split()
        ret = set(tmp[1:]).intersection(dict_map.keys())
        ids = []
        for x in ret:
            ids.append(dict_map[x])
        adjacent_keys[dict_map[tmp[0]]].extend(ids)

    num_its = 0
    saved_data_target = []
    for data, target in test_loader:
        if num_its >= args.test_size:
            break

        if num_tests == 1:
            saved_data_target += list(zip(list(data), list(target)))

        num_its += data.size()[0]
        if num_its % 100 == 0:
            print(num_its, model_stats[0].domains[0].safe * 100.0 / num_its)
        if args.test_swap_delta > 0:
            length = data.size()[1]
            data = data.repeat(1, length)
            for i in data:
                for j in range(length - 1):
                    for _ in range(args.test_swap_delta):
                        t = np.random.randint(0, length)
                        while len(adjacent_keys[int(i[t])]) == 0:
                            t = np.random.randint(0, length)
                        cid = int(i[t])
                        i[j * length + t] = adjacent_keys[cid][0]
            target = (target.view(-1, 1).repeat(1, length)).view(-1)
            data = data.view(-1, length)

        if h.use_cuda:
            data, target = data.cuda().to_dtype(), target.cuda()

        for m in model_stats:

            with torch.no_grad():
                pred = m.model(data).vanillaTensorPart().max(1, keepdim=True)[
                    1]  # get the index of the max log-probability
                m.correct += pred.eq(target.data.view_as(pred)).sum()

            for stat in m.domains:
                timer = Timer(shouldPrint=False)
                with timer:

                    def calcData(data, target):
                        box = stat.domain.box(data,
                                              w=m.model.w,
                                              model=m.model,
                                              untargeted=True,
                                              target=target).to_dtype()
                        with torch.no_grad():
                            bs = m.model(box)
                            org = m.model(data).vanillaTensorPart().max(
                                1, keepdim=True)[1]
                            stat.width += bs.diameter().sum().item(
                            )  # sum up batch loss
                            stat.proved += bs.isSafe(org).sum().item()
                            stat.safe += bs.isSafe(target).sum().item()
                            # stat.max_eps += 0 # TODO: calculate max_eps

                    if m.model.net.neuronCount(
                    ) < 5000 or stat.domain in SYMETRIC_DOMAINS:
                        calcData(data, target)
                    else:
                        if args.test_swap_delta > 0:
                            length = data.size()[1]
                            pre_stat = copy.deepcopy(stat)
                            for i, (d, t) in enumerate(zip(data, target)):
                                calcData(d.unsqueeze(0), t.unsqueeze(0))
                                if (i + 1) % length == 0:
                                    d_proved = (stat.proved -
                                                pre_stat.proved) // length
                                    d_safe = (stat.safe -
                                              pre_stat.safe) // length
                                    d_width = (stat.width -
                                               pre_stat.width) / length
                                    stat.proved = pre_stat.proved + d_proved
                                    stat.safe = pre_stat.safe + d_safe
                                    stat.width = pre_stat.width + d_width
                                    pre_stat = copy.deepcopy(stat)
                        else:
                            for d, t in zip(data, target):
                                calcData(d.unsqueeze(0), t.unsqueeze(0))
                stat.time += timer.getUnitTime()

    l = num_its  # len(test_loader.dataset)
    for m in model_stats:
        if args.lr_multistep:
            m.model.lrschedule.step()

        pr_corr = float(m.correct) / float(l)
        if args.use_schedule:
            m.model.lrschedule.step(1 - pr_corr)

        h.printBoth(
            ('Test: {:12} trained with {:' + str(largest_domain) +
             '} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format(
                 m.model.name, m.model.ty.name, m.model.speed, m.correct, l,
                 100. * pr_corr),
            f=f)

        model_stat_rec = ""
        for stat in m.domains:
            pr_safe = stat.safe / l
            pr_proved = stat.proved / l
            pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
            h.printBoth((
                "\t{:" + str(largest_test_domain) +
                "} - Width: {:<36.16f} Pr[Proved]={:<1.3f}  Pr[Corr and Proved]={:<1.3f}  Pr[Corr|Proved]={:<1.3f} {}Time = {:<7.5f}"
            ).format(
                stat.name, stat.width / l, pr_proved, pr_safe,
                pr_corr_given_proved,
                "AvgMaxEps: {:1.10f} ".format(stat.max_eps / l)
                if stat.max_eps is not None else "", stat.time),
                        f=f)
            model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(
                stat.name, pr_proved, pr_safe, pr_corr_given_proved)
        prepedname = m.model.ty.name.replace(" ", "_").replace(
            ",", "").replace("(", "_").replace(")", "_").replace("=", "_")
        net_file = os.path.join(
            out_dir, m.model.name + "__" + prepedname + "_checkpoint_" +
            str(epoch) + "_with_{:1.3f}".format(pr_corr))

        h.printBoth("\tSaving netfile: {}\n".format(net_file + ".pynet"), f=f)

        if (num_tests % args.save_freq == 1 or args.save_freq
                == 1) and not args.dont_write and (num_tests > 1
                                                   or args.write_first):
            print("Actually Saving")
            torch.save(m.model.net, net_file + ".pynet")
            if args.save_dot_net:
                with h.mopen(args.dont_write, net_file + ".net", "w") as f2:
                    m.model.net.printNet(f2)
                    f2.close()
            if args.onyx:
                nn = copy.deepcopy(m.model.net)
                nn.remove_norm()
                torch.onnx.export(
                    nn,
                    h.zeros([1] + list(input_dims)),
                    net_file + ".onyx",
                    verbose=False,
                    input_names=["actual_input"] + [
                        "param" + str(i)
                        for i in range(len(list(nn.parameters())))
                    ],
                    output_names=["output"])

    if num_tests == 1 and not args.dont_write:
        img_dir = os.path.join(out_dir, "images")
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        for img_num, (img, target) in zip(
                range(args.number_save_images),
                saved_data_target[:args.number_save_images]):
            sz = ""
            for s in img.size():
                sz += str(s) + "x"
            sz = sz[:-1]

            img_file = os.path.join(
                img_dir, args.dataset + "_" + sz + "_" + str(img_num))
            if img_num == 0:
                print("Saving image to: ", img_file + ".img")
            with open(img_file + ".img", "w") as imgfile:
                flatimg = img.view(h.product(img.size()))
                for t in flatimg.cpu():
                    print(decimal.Decimal(float(t)).__format__("f"),
                          file=imgfile)
            with open(img_file + ".class", "w") as imgfile:
                print(int(target.item()), file=imgfile)
Exemplo n.º 28
0
 def init(self, prev, **kargs):
     return h.product(prev)
Exemplo n.º 29
0
def train(epoch, models, decay=True):
    global total_batches_seen

    for model in models:
        model.train()
        #if args.decay_fir:
        #    if epoch > 1 and isinstance(model.ty, goals.DList) and len(model.ty.al) == 2 and decay:
        #        for (i, a) in enumerate(model.ty.al):
        #            if i == 1:
        #                model.ty.al[i] = (a[0], Const(min(a[1].getVal() + 0.0025, 0.75)))
        #            else:
        #                model.ty.al[i] = (a[0], Const(max(a[1].getVal() - 0.0025, 0.25)))

    for batch_idx, (data, target) in enumerate(train_loader):
        if total_batches_seen * args.batch_size % 4000 == 0:
            for model in models:
                if args.decay_fir:
                    if isinstance(model.ty, goals.DList) and len(
                            model.ty.al) == 2 and decay:
                        for (i, a) in enumerate(model.ty.al):
                            if i == 1:
                                model.ty.al[i] = (a[0],
                                                  Const(
                                                      min(
                                                          a[1].getVal() +
                                                          0.0025, 3)))
                            # else:
                            #    model.ty.al[i] = (a[0], Const(max(a[1].getVal() - 0.00075, 0.25)))

        total_batches_seen += 1
        time = float(total_batches_seen) / len(train_loader)
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            model.global_num += data.size()[0]

            timer = Timer(
                "train a sample from " + model.name + " with " + model.ty.name,
                data.size()[0], False)
            lossy = 0
            with timer:
                for s in model.getSpec(data.to_dtype(), target, time=time):
                    model.optimizer.zero_grad()
                    loss = model.aiLoss(*s, time=time, **vargs).mean(dim=0)
                    lossy += loss.detach().item()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals")
                        if p is not None and p.grad is not None and torch.isnan(
                                p.grad).any():
                            print("Such nan in postmagic")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.grad = torch.where(
                                torch.isnan(p.grad),
                                torch.normal(mean=h.zeros(p.grad.shape),
                                             std=stdv), p.grad)

                    model.optimizer.step()

                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            print("Such nan in vals after grad")
                            stdv = 1 / math.sqrt(h.product(p.data.shape))
                            p.data = torch.where(
                                torch.isnan(p.data),
                                torch.normal(mean=h.zeros(p.data.shape),
                                             std=stdv), p.data)

                    if args.clip_norm:
                        model.clip_norm()
                    for p in model.parameters():
                        if p is not None and torch.isnan(p).any():
                            raise Exception("Such nan in vals after clip")

            model.addSpeed(timer.getUnitTime())

            if batch_idx % args.log_interval == 0:
                print((
                    'Train Epoch {:12} {:' + str(largest_domain) +
                    '}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}'
                ).format(model.name, model.ty.name, epoch,
                         batch_idx * len(data), len(train_loader.dataset),
                         100. * batch_idx / len(train_loader), model.speed,
                         lossy))

    val = 0
    val_origin = 0
    batch_cnt = 0
    for batch_idx, (data, target) in enumerate(val_loader):
        batch_cnt += 1
        if h.use_cuda:
            data, target = data.cuda(), target.cuda()

        for model in models:
            for s in model.getSpec(data.to_dtype(), target):
                loss = model.aiLoss(*s, **vargs).mean(dim=0)
                val += loss.detach().item()

            loss = model.aiLoss(data, target, **vargs).mean(dim=0)
            val_origin += loss.detach().item()

    return val_origin / batch_cnt, val / batch_cnt
Exemplo n.º 30
0
 def forward(self, x, **kargs):
     if h.product(x.size()[2:]) == 1:
         return x
     return x.avg_pool2d(kernel_size=self.kernel_size,
                         stride=self.stride,
                         padding=1)