def get_perfs(self, inputs, outputs, targets, cand_net): inputs_adv = self._gen_adv(inputs, outputs, targets, cand_net) outputs_adv = cand_net(inputs_adv) return ( float(accuracy(outputs, targets)[0]) / 100, float(accuracy(outputs_adv, targets)[0]) / 100, )
def get_perfs(self, inputs, outputs, targets, cand_net): inputs_adv = self._gen_adv(inputs, outputs, targets, cand_net) outputs_adv = cand_net(inputs_adv) self._adv_total += len(inputs_adv) self._adv_correct += (float(accuracy(outputs_adv, targets)[0]) / 100 * len(inputs_adv)) print("Acc: {}".format(self._adv_correct / self._adv_total)) sys.stdout.flush() return ( float(accuracy(outputs, targets)[0]) / 100, float(accuracy(outputs_adv, targets)[0]) / 100, )
def get_perfs(self, inputs, outputs, targets, cand_net): inputs_adv = self._gen_adv(inputs, outputs, targets, cand_net) if hasattr(cand_net, "super_net"): # clear the flops statistics cand_net.super_net.reset_flops() # the forward hooks will calculate the flops statistics outputs_adv = cand_net(inputs_adv) if isinstance(cand_net, nn.DataParallel): flops = cand_net.module.total_flops else: flops = (cand_net.super_net.total_flops if hasattr( cand_net, "super_net") else (cand_net.module.total_flops if isinstance( cand_net, DistributedDataParallel) else cand_net.total_flops)) return ( float(accuracy(outputs, targets)[0]) / 100, float(accuracy(outputs_adv, targets)[0]) / 100, flops, )
def get_reward(self, inputs, outputs, targets, cand_net): acc = float(accuracy(outputs, targets)[0]) / 100 if self.lamb is not None: latency_penalty = 0. ss = self.search_space for i_layer, geno in enumerate(cand_net.genotypes): prim = geno[0][0] prims = ss.cell_shared_primitives[ss.cell_layout[i_layer]] latency_penalty += float(self.latency_lut[i_layer][prims.index(prim)]) # return acc + float(self.lamb) / (latency_penalty - self._min_lat + 1.) return acc + float(self.lamb) * (1. / latency_penalty - 1. / self._max_lat) return acc
def get_perfs(self, inputs, outputs, targets, cand_net): """ Get top-1 acc. """ outputs_f = cand_net.forward_one_step_callback(inputs, callback=self.inject) if hasattr(cand_net, "super_net"): cand_net.super_net.reset_flops() if self.calc_latency: cand_net.forward(inputs) if isinstance(cand_net, nn.DataParallel): flops = cand_net.module.total_flops else: flops = cand_net.super_net.total_flops if hasattr(cand_net, "super_net") else \ cand_net.total_flops if hasattr(cand_net, "super_net"): cand_net.super_net._flops_calculated = True return float(accuracy(outputs, targets)[0]) / 100, \ float(accuracy(outputs_f, targets)[0]) / 100, \ 1 / max(flops * 1e-6 - 180, 20) return float(accuracy(outputs, targets)[0]) / 100, \ float(accuracy(outputs_f, targets)[0]) / 100, \
def get_perfs(self, inputs, outputs, targets, cand_net): adv_examples = self._gen_adv(inputs, outputs, targets, cand_net) adv_classes = np.asarray( [adv.adversarial_class for adv in adv_examples]) adv_distance = np.asarray([adv.distance.value for adv in adv_examples]) # NOTE: if there is any chance the adv example will be forwarded again, # should convert the adv examples from [0,1] to the normalized domain by (.-mean)/std return ( float(accuracy(outputs, targets)[0]) / 100, np.sum(adv_classes == targets.cpu().numpy()) / len(inputs), 1e10 * adv_distance, 1e10 * adv_distance, )
def get_perfs(self, inputs, outputs, targets, cand_net): """ Get top-1 acc. """ cand_net.forward(inputs) if hasattr(cand_net, "elapse"): elapse = cand_net.elapse else: t_0 = timeit.default_timer() cand_net.forward(inputs) elapse = timeit.default_timer() - t_0 return float(accuracy( outputs, targets)[0]) / 100, self.latency(cand_net), 1000 * elapse
def get_perfs(self, inputs, outputs, targets, cand_net): acc = float(accuracy(outputs, targets)[0]) / 100 total_latency = 0. ss = self.search_space if cand_net.super_net.rollout_type == "discrete": for i_layer, geno in enumerate(cand_net.genotypes): prim = geno[0][0] prims = ss.cell_shared_primitives[ss.cell_layout[i_layer]] total_latency += float(self.latency_lut[i_layer][prims.index(prim)]) else: for i_layer, arch in enumerate(cand_net.arch): latency = (arch[0] * \ torch.Tensor(self.latency_lut[i_layer]).to(arch.device)).sum().item() if arch[0].ndimension() == 2: latency /= arch[0].shape[0] total_latency += latency return [acc, total_latency]
def get_reward(self, inputs, outputs, targets, cand_net): acc = float(accuracy(outputs, targets)[0]) / 100 if self.lamb is not None: latency_penalty = 0. ss = self.search_space # first half is the primitive type of each cell, second half is the concat nodes num_cells = len(cand_net.genotypes) // 2 prim_types = cand_net.genotypes[:num_cells] for i_layer, geno in enumerate(prim_types): prim = geno[0][0] prims = ss.cell_shared_primitives[ss.cell_layout[i_layer]] latency_penalty += float( self.latency_lut[i_layer][prims.index(prim)]) # return acc + float(self.lamb) / (latency_penalty - self._min_lat + 1.) return acc + float( self.lamb) * (1. / latency_penalty - 1. / self._max_lat) return acc
def get_acc(self, inputs, outputs, annotations, cand_net): conf_t, _, _ = self.batch_transform(inputs, outputs, annotations) # target: [batch_size, anchor_num, 5], boxes + labels keep = conf_t > 0 _, confidences, _ = outputs return accuracy(confidences[keep], conf_t[keep], topk=(1, 5))
def get_perfs(self, inputs, outputs, targets, cand_net): """ Get top-1 acc. """ return [float(accuracy(outputs, targets)[0]) / 100]