コード例 #1
0
 def tier_level_elm(elm,tier_levels):
   """tier_levels=[bool,bool]"""
   if elm.og_index in break_points: st()
   cd = elm.call_data
   t1,t2,t3 = fmt_syms.get_Tier1() if tier_levels[0] else fmt_syms.get_Tier2()
   tier = 1 if tier_levels[0] else 2
   if isinstance(cd,SimpleFunkWithArgs):
     return (
       simp_funk_w_args(elm,sym1=t1,sym2=t2,c=True),
       simp_funk_w_args(elm,sym1=t1,sym2=t2,c=False)
     )
   elif isinstance(cd,LineEvent):
     return (
       prsd_line(elm,le2=t2,c=True),
       prsd_line(elm,le2=t2,c=False)
     )
   elif isinstance(cd,ParsedHTML):
     t1,t2,t3 = fmt_syms.get_Tier(tier)
     return (
       prsd_html(elm,sym1=t1,sym2=t2,c=True),
       prsd_html(elm,sym1=t1,sym2=t2,c=False)
     )
   elif isinstance(cd,ParsedJSON):
     t1,t2,t3 = fmt_syms.get_Tier(tier)
     return (prsd_json(elm,sym1=t1,sym2=t2,c=True),
       prsd_json(elm,sym1=t1,sym2=t2,c=False)
     )
   elif isinstance(cd,VerboseList):
     return prsd_lst(elm,c=True,tier=tier),prsd_lst(elm,c=False,tier=tier)
   elif isinstance(cd[0],ParsedTuple):
     return prsd_tpl(elm,c=True,tier=tier),prsd_tpl(elm,c=False,tier=tier)
   else:
     raise
コード例 #2
0
ファイル: plottify.py プロジェクト: DmitryUlyanov/plottify
def plot_polygons(img,
                  poly_dict,
                  alpha=0.5,
                  ax=None,
                  figsize=None,
                  show_axs=False):
    if not ax:
        fig, ax = plt.subplots(figsize=figsize)
    show_img(img, ax, figsize, show_axs)
    patches = []
    colors = []

    labels = list(poly_dict.keys())
    st()
    for l in labels:
        color = 100 * np.random.rand()
        for polygons in poly_dict[l]:
            for poly in polygons:
                if poly:
                    # st()
                    p = np.array(convert_poly(poly, img.shape))
                    patches.append(Polygon(p, True))
            colors.append(color)
    patch_collection = PatchCollection(patches, alpha=alpha)
    patch_collection.set_array(np.array(colors))
    ax.add_collection(patch_collection)
コード例 #3
0
 def entry():
     query, actions, outputs, filenames, write_func, pkldf = qcfg = QueryConfig(
     ).fullcall()
     dfpath = basepath.joinpath('src/youtube-dl/bin/agg.fullcall/fcdf.pkl')
     dfs = {}
     if not dfpath.exists():
         fcdf = fur.get_df_from_tracefile(filenames[0])
         original_index = list(fcdf.index)
         fcdf['og_index'] = original_index
         fcdf.to_pickle(dfpath)
     else:
         fcdf = pd.read_pickle(dfpath)
     st()
     fcdf = fcdf[:30]  #!! RIGHT HERE
     # if TRUNC: fcdf = fcdf[trunc_start:trunc_end]
     columns = [
         "filepath", "line_number", "event_kind", "call_data", "og_index"
     ]
     fcdf_agg = fud.aggregate_aggdfs([fcdf], columns=columns)
     cds = call_data_series = fcdf_agg.apply(process_row, axis=1)
     cdl = call_data_lst = [
         SimpleFunkWithArgs(cd) if
         not (isinstance(cd, LineEvent) or isinstance(cd, VerboseList)
              or isinstance(cd, ParsedHTML) or isinstance(cd, ParsedJSON)
              or isinstance(cd[0], ParsedTuple)) else cd
         for cd in call_data_series
     ]
     fcdf_agg.call_data = call_data_lst
     lwr = lst_w_regions = add_regions(fcdf_agg)
     lwr_df = pd.DataFrame(lwr)
     wpath = util.homepath.joinpath('bin/lwr')
     lwr2 = write_file(df=fcdf_agg, lst_w_regions=lwr, writepath=wpath)
     return lwr2
コード例 #4
0
 def get_syn(sentence, word):
     # return WordNet.get_syn(word, types=[SynonymSubstituition.get_type(sentence, word)])[0]
     # st()
     word = word.split(' ')[0]
     syn = WordNet.get_syn(
         word, types=[SynonymSubstituition.get_type(sentence, word)])
     if len(syn) == 0:
         syn = WordNet.get_syn(
             word
         )  # get all syns, ranked by general commonality (not wd specific)
     if len(syn) > 0:
         syn = pd.Series(syn)
         try:
             syn = syn[syn != word].unique()
         except:
             st()
         syn2 = []
         for i in range(len(syn)):
             if len(syn[i].split(', ')) > 0:
                 syn[i] = syn[i].split(', ')
                 key = lambda x: CheckRareWord(
                     SynonymSubstituition.clean(x).split(' ')[0])
                 for wd in sorted(syn[i], key=key):
                     syn2.append(wd.strip())
             else:
                 syn2.append(syn[i].strip())
     return ', '.join(syn2[0:SynonymSubstituition.num_syns_display]) if len(syn) > 0  \
            else '?'
コード例 #5
0
    def get_list_of_scenarios(config_file_name):
        """
        Parse *all* config files and provide a list with all scenarios @return
        """

        list_of_config_files = glob.glob("{}/srunner/examples/*.xml".format(
            os.getenv('ROOT_SCENARIO_RUNNER', "./")))
        list_of_config_files += glob.glob("{}/srunner/examples/*.xosc".format(
            os.getenv('ROOT_SCENARIO_RUNNER', "./")))

        from pdb import set_trace as st
        st()
        if config_file_name != '':
            list_of_config_files.append(config_file_name)

        scenarios = []
        for file_name in list_of_config_files:
            if ".xosc" in file_name:
                tree = ET.parse(file_name)
                scenarios.append("{} (OpenSCENARIO)".format(
                    tree.find("FileHeader").attrib.get('description', None)))
            else:
                tree = ET.parse(file_name)
                for scenario in tree.iter("scenario"):
                    scenarios.append(scenario.attrib.get('name', None))

        return scenarios
コード例 #6
0
def train_cifar(args, nb_epochs, trainloader,testloader, net,optimizer,criterion,logging_freq=2000):
    # TODO: test loss
    for epoch in range(nb_epochs):  # loop over the dataset multiple times
        running_train_loss = 0.0
        #running_test_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            start_time = time.time()
            inputs, labels = data
            if args.enable_cuda:
                inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
            else:
                inputs, labels = Variable(inputs), Variable(labels)
            # zero the parameter gradients
            optimizer.zero_grad()
            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            # print statistics
            running_train_loss += loss.data[0]
            seconds,minutes,hours = utils.report_times(start_time)
            st()
            if i % logging_freq == logging_freq-1:    # print every logging_freq mini-batches
                # note you dividing by logging_freq because you summed logging_freq mini-batches, so the average is dividing by logging_freq.
                print(f'monitoring during training: eptoch={epoch+1}, batch_index={i+1}, loss={running_train_loss/logging_freq}')
                running_train_loss = 0.0
コード例 #7
0
 def get_weight_stat(self, batch, W_type):
     ## get lengths of sequences for each sample in the batch
     weight_lengths = self.Tensor([len(sample[W_type])
                                   for sample in batch]).long()
     ## padd
     #st()
     new_batch = []
     for i, sample in enumerate(batch):
         try:
             print(f'i = {i}')
             print(f'sample = {sample}')
             tensor_sample = self.Tensor(sample[W_type])
             print(f'tensor_sample = {tensor_sample}')
             new_batch.append(tensor_sample)
         except:
             print(f'\n ---- ERROR: i = {i}')
             print(f'sample = {sample}')
             st()
     ## padd batch sequences
     batch_weight_rep = torch.nn.utils.rnn.pad_sequence(
         new_batch,
         batch_first=self.batch_first,
         padding_value=self.padding_value)
     ## compute mask
     weight_mask = (batch_weight_rep != self.padding_value)
     ##
     return batch_weight_rep.to(self.device), weight_lengths.to(
         self.device), weight_mask.to(self.device)
コード例 #8
0
    def fix_init(self, elm):
        new_filepath = Path(elm.filepath).stem
        assert new_filepath is not None, elm
        if "__init__" in new_filepath:
            lno = int(elm.line_number)

            def trunc(f):
                return f[lno - 3:lno + 3]

            def func(e):
                return e.filepath

            e, d, p, y = trnctd_fls = [trunc(f) for f in self.initfiles]
            try:
                if lno > 61:
                    new_filepath = "__init__"
                elif e and any([func(elm) in itm for itm in e]):
                    new_filepath = "e.__init__"
                elif d and any([func(elm) in itm for itm in d]):
                    new_filepath = "d.__init__"
                elif p and any([func(elm) in itm for itm in p]):
                    new_filepath = "p.__init__"
                elif y and any([func(elm) in itm for itm in y]):
                    new_filepath = "__init__"
            except Exception as exc:
                tb = stackprinter.format(exc)
                print('The front fell off.' + tb)
                st()
        if new_filepath is None: st()
        return new_filepath
コード例 #9
0
 def prsd_lst(elm, c=False, tier=0):
     """VerboseList"""
     if elm.og_index == 39: st()
     cd = elm.call_data
     m = re.match(r"\s*", cd.get_funcname())
     idt = m.group(0)
     if c:
         sym_func = f"{cd.symbol} {cd.funk}"
         filename_fileno = (f"{util.fmt_filename(elm.filepath)}"
                            f":{util.fmt_lineno(elm.line_number)}\n")
         filename_fileno = util.filename_fileno2(elm.filepath,
                                                 elm.line_number) + "\n"
         argname_argvals = (f"{cd.get_args(c=True)}")
     else:
         sym_func = f"{cd.symbol} {cd.funk}"
         filename_fileno = (
             f"{util.remove_color(util.fmt_filename(elm.filepath))}"
             f":{util.remove_color(util.fmt_lineno(elm.line_number))}\n")
         argname_argvals = (f"{cd.get_args(c=False)}")
     split_args = argname_argvals.split("\n")
     num_args = len(split_args)
     vl1, vl2 = fmt_syms.get_VL(nargs=num_args)
     argname_argvals = "\n".join([
         f"{next(vl2,None)}{idt}{line}"
         for line in argname_argvals.split("\n")
     ])
     fillchars = util.get_fillchars(sym_func)
     elm_as_str = f'{vl1}' + sym_func + fillchars + filename_fileno + argname_argvals
     return elm_as_str
コード例 #10
0
ファイル: nninst_graph.py プロジェクト: lsabc/fse20
 def iterate_graph(node_id: int):
     node_name = self.node(node_id).name
     
     if node_name not in layer_list:
         node = self.node(node_id)
         if isinstance(self.node(node_id), Operation):
             print(type(self.node(node_id)))
             nonlocal weights
             if isinstance(node, DenseOp):
                 input_shape = self.node(node._inputs[0]).shape
                 output_shape = self.node(node._outputs[0]).shape
                 syn = input_shape[1] * output_shape[1]
                 
             elif (isinstance(node, AvgPoolOp) or 
                 isinstance(node, ReluOp) or
                 isinstance(node, BatchNormOp)
                 ):
                 input_shape = self.node(node._inputs[0]).shape
                 output_shape = self.node(node._outputs[0]).shape
                 syn = np.prod(input_shape)
             elif (isinstance(node, ReshapeOp) or 
                 isinstance(node, PadOp) or
                 isinstance(node, TransposeOp)
                 ):
                 input_shape, output_shape, syn = 0, 0, 0
             elif isinstance(node, AddOp):
                 input_shape = node._inputs
                 output_shape = self.node(node._outputs[0]).shape
                 syn = np.prod(output_shape) * len(node._inputs)
             elif isinstance(node, Conv2dOp):
                 input_shape = self.node(node._inputs[0]).shape
                 output_shape = self.node(node._outputs[0]).shape
                 # print(input_shape[1], output_shape, 3, 3)
                 syn = input_shape[1] * output_shape[1] *3*3
             else:
                 print(node)
                 st()
             # print(node, input_shape, output_shape, syn)
             weights += syn
             
         layer_list.append(node_name)
         if node_id not in self.inputs:
             node_inputs = self.node(node_id).inputs
             if len(node_inputs) == 1:
                 iterate_graph(node_inputs[0])
             else:
                 node_inputs = list(
                     filter(
                         lambda input_id: not (
                             self.contains_tensor(input_id)
                             and self.tensor(input_id).name.startswith("add")
                         ),
                         node_inputs,
                     )
                 )
                 # if len(node_inputs) != 1:
                 #     print(f"guess: choose first input in {node_name}")
                 iterate_graph(node_inputs[0])
     else:
         print(f"node {node_name} is already in list")
コード例 #11
0
    def __init__(self, hyperparams):
        # Get configs, hyperparameters & initialize agent
        self.ptconf, self.dconf, self.camconf = hyperparams[
            'ptconf'], hyperparams['dconf'], hyperparams['camconf']
        self.env_name = hyperparams['env']
        self.gui_on = hyperparams['bullet_gui_on']
        self.hyperparams = hyperparams
        config = copy(AGENT_BULLET)
        config.update(hyperparams)
        Agent.__init__(self, config)

        # Setup bullet environment
        self._setup_conditions()
        self._setup_world(hyperparams['filename'])
        self.setup_bullet()
        self.setup_inference_camera()
        # Get demo data
        self.demo_vid = imageio.get_reader(
            join(self.dconf.DEMO_DIR, self.dconf.DEMO_NAME,
                 'rgb/{}.mp4'.format(self.dconf.SEQNAME)))
        self.demo_frames = []
        self.reset_condition = hyperparams['reset_condition']
        for im in self.demo_vid:
            self.demo_frames.append(im)
        # self.cost_tgt_mean = hyperparams['cost_tgt_mean']
        # self.cost_tgt_std = hyperparams['cost_tgt_std']
        self.objects_centroids_mrcnn = hyperparams['objects_centroids_mrcnn']

        # Setup feature embedding network if enabled
        self.tcn = None

        # Setup Mask RCNN
        inference_config = InferenceConfig()
        with tf.device('/device:GPU:1'):
            self.mrcnn = modellib.MaskRCNN(
                mode='inference',
                model_dir=join(self.ptconf.EXP_DIR, self.ptconf.EXP_NAME,
                               'mrcnn_logs'),
                config=inference_config)
            self.mrcnn.load_weights(self.ptconf.WEIGHTS_FILE_PATH_MRCNN,
                                    by_name=True)
        self.class_names = gconf.CLASS_NAMES_W_BG
        # self.target_ids = gconf.CLASS_IDS
        self.target_ids = [1, 2]
        self.colors = visualize.random_colors(7)
        self.plotting_on = hyperparams['plotting_on']
        if self.plotting_on:
            self.fig, self.ax = visualize.get_ax()
        self.mrcnn_centroids_last_known = {
            key: None
            for key in self.target_ids
        }
        st()

        # Run MRCNN on one image because first detection always takes long to initialize
        self.vid_seqname = 0
        rgb_crop, depth_crop = self.get_images(0)
        results = self.mrcnn.detect([rgb_crop], verbose=0)
        self.blob_detector = BlobDetector()
コード例 #12
0
 def get_results_from_normalized_net(self, epoch, seed_id,
                                     path_to_folder_expts):
     ''' '''
     ''' get net '''
     nets_folders = [
         filename for filename in os.listdir(path_to_folder_expts)
         if 'nets_folder' in filename
     ]
     net_folder = [
         filename for filename in nets_folders
         if f'seed_{seed_id}' in filename
     ][0]  # note seed are unique very h.p.
     net_path = os.path.join(path_to_folder_expts, net_folder)
     if len([
             net_name for net_name in os.listdir(net_path)
             if f'epoch_{epoch}' in net_name
     ]) == 0:
         st()
     net_name = [
         net_name for net_name in os.listdir(net_path)
         if f'epoch_{epoch}' in net_name
     ][0]
     net_path = os.path.join(net_path, net_name)
     net = torch.load(net_path)
     ''' get unormalized test error '''
     train_loss_un, train_error_un = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.trainloader, self.device)
     test_loss_un, test_error_un = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.testloader, self.device)
     ## random labels
     train_loss_un_rand, train_error_un_rand = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.trainloader_rand, self.device)
     test_loss_un_rand, test_error_un_rand = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.testloader_rand, self.device)
     ''' normalize net '''
     net = self.normalize(net)
     ''' get normalized train errors '''
     ## natural labels
     train_loss_norm, train_error_norm = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.trainloader, self.device)
     test_loss_norm, test_error_norm = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.testloader, self.device)
     ## random labels
     train_loss_norm_rand, train_error_norm_rand = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.trainloader_rand, self.device)
     test_loss_norm_rand, test_error_norm_rand = evalaute_mdl_on_full_data_set(
         self.loss, self.error, net, self.testloader_rand, self.device)
     ''' pack results '''
     normalized_results = (train_loss_norm, train_error_norm,
                           test_loss_norm, test_error_norm)
     unnormalized_results = (train_loss_un, train_error_un, test_loss_un,
                             test_error_un)
     ##
     normalized_results_rand = (train_loss_norm_rand, train_error_norm_rand,
                                test_loss_norm_rand, test_error_norm_rand)
     unnormalized_results_rand = (train_loss_un_rand, train_error_un_rand,
                                  test_loss_un_rand, test_error_un_rand)
     ''' return '''
     return normalized_results, unnormalized_results, normalized_results_rand, unnormalized_results_rand
コード例 #13
0
 def fix_init(self,elm):
   new_filepath = Path(elm.filename).stem
   assert new_filepath is not None, elm
   if "__init__" in new_filepath:
     parent_initial = Path(elm.filename).parent.stem[0]
     new_filepath = f"{parent_initial}{new_filepath}"
   if new_filepath is None: st()
   return new_filepath
コード例 #14
0
def check_order_data(trainloader):
    for i,data_train in enumerate(trainloader):
        if i==0:
            print(i)
            st()
            #print(data_train)
    for i,data_train in enumerate(trainloader):
        if i==3:
            print(i)
コード例 #15
0
def test_mutate(model, dataset, args):
    model.eval()

    for idx in range(args.test_num):
        raw_img, label = dataset[idx]
        image = np.array(raw_img)
        imgs = [image]

        # these parameters need to be carefullly considered in the experiment
        # to consider the feedbacks
        params = []
        # params.append(list(range(-10, 10)))  # image_translation
        # params.append(list(map(lambda x: x * 0.1, list(range(9, 11)))))  # image_scale
        # params.append(list(map(lambda x: x * 0.1, list(range(-1, 1)))))  # image_shear
        # params.append(list(range(-10, 10)))  # image_rotation
        # params.append(list(map(lambda x: x * 0.1, list(range(7, 13)))))  # image_contrast
        # params.append(list(range(-15, 15)))  # image_brightness
        # params.append(list(range(1, 10)))  # image_blur
        # params.append(list(range(1, 10)))  # image_pixel_change
        # params.append(list(range(1, 3)))  # image_noise [1,2]

        params.append([-10, 10])  # image_translation
        params.append(list(map(lambda x: x * 0.1, [9, 11])))  # image_scale
        params.append(list(map(lambda x: x * 0.1, [-1, 1])))  # image_shear
        params.append([-10, 10])  # image_rotation
        params.append(list(map(lambda x: x * 0.1, [7, 12])))  # image_contrast
        params.append([-15, 15])  # image_brightness
        params.append([9])  # image_blur
        params.append([9])  # image_pixel_change
        params.append([2])  # image_noise

        classA = [7, 8]  # pixel value transformation
        classB = [0, 1, 2, 3, 4, 5, 6]  # Affine transformation

        x, y, z = image.shape
        # random.seed(time.time())

        for tid in range(9):
            # print(tid)
            # tid = random.sample(classA + classB, 1)[0]

            # tid = 7
            # Randomly select one transformation   Line-7 in Algorithm2
            transformation = transformations[tid]
            param_range = params[tid]
            for param_id, param in enumerate(param_range):

                img_new = transformation(copy.deepcopy(image), param)
                imgs.append(img_new)

        row_img = np.zeros((x, y * len(imgs), 3), dtype=np.uint8)
        for i in range(len(imgs)):
            row_img[:, i * y:(i + 1) * y, :] = imgs[i]
        path = osp.join("results/test", f"{idx}.png")
        Image.fromarray(row_img).save(path)

        st()
コード例 #16
0
    def load_nc_info(self, ):
        path = osp.join(self.args.nc_info_dir, "accumulate_coverage.npy")
        with open(path, "rb") as f:
            accumulate_coverage = np.load(f, allow_pickle=True)

        path = osp.join(self.args.nc_info_dir, "log_module_names.npy")
        with open(path, "rb") as f:
            log_names = np.load(f, allow_pickle=True)
        return accumulate_coverage, log_names
        st()
コード例 #17
0
 def merge_trace(acc_trace, trace2):
     node_names = acc_trace.ops.keys()
     for node_name in node_names:
         if TraceKey.IO_AVG in acc_trace.ops[node_name]:
             acc_point_logger = acc_trace.ops[node_name][TraceKey.IO_AVG]
             if TraceKey.IO_AVG not in trace2.ops[node_name]:
                 st()
             new_point_logger = trace2.ops[node_name][TraceKey.IO_AVG]
             acc_point_logger += new_point_logger
             acc_trace.ops[node_name][TraceKey.IO_AVG] = acc_point_logger
コード例 #18
0
ファイル: utils.py プロジェクト: alxmamaev/kekas
def children_and_parameters(m: nn.Module):
    """Return the children of `m` and its direct parameters not registered in modules."""
    children = list(m.children())
    children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],
                     [])
    for p in m.parameters():
        if id(p) not in children_p:
            st()
            children.append(ParameterModule(p))
    return children
コード例 #19
0
def compute_features(trip, d_id=None, t_id=None):
    # print d_id,t_id
    trip_duration = float(len(trip))
    speed, acceleration = speeds_and_accelerations(trip)
    speed = abs(speed)
    trip_length = float(np.sum(speed))
    speedup = acceleration[acceleration > 0]
    slowdown = -acceleration[acceleration < 0]
    try:
        curvature = get_curvature(trip)
    except:
        logging.exception('Cannot compute curvature for trip %d of driver %d' % (t_id, d_id))
        curvature = np.array([])

    curvature_percentiles = [percentile(curvature, pc) for pc in [90, 75, 50, 25, 10]]
    curvature_maxima_x = sp.signal.argrelmax(curvature, order=2)[0]
    curvature_maxima_y = curvature[curvature_maxima_x]
    curvature_maxima_speed = speed[curvature_maxima_x]
    curvature_maxima_percentiles = [percentile(curvature_maxima_y, pc) for pc in [75, 50, 25]]
    centrifugal_acceleration = np.power(curvature_maxima_speed, 2)*curvature_maxima_y
    # curvature_inter_maxima = [np.diff(maxima) for maxima in curvature_maxima]
    # median_curvature_inter_maxima = [np.percentile(cim, 50) if cim.size else trip_length for cim in curvature_inter_maxima]

    mean_slowdown, mean_speedup = speeds_near_stops(speed)

    features = [
        trip_length,
        trip_duration,
        np.percentile(speed, 75),
        np.percentile(speed, 50),
        np.percentile(speed, 25),
        np.percentile(speedup, 75),
        np.percentile(speedup, 50),
        np.percentile(speedup, 25),
        np.percentile(slowdown, 75),
        np.percentile(slowdown, 50),
        np.percentile(slowdown, 25),
        curvature_percentiles[0],
        curvature_percentiles[1],
        curvature_percentiles[2],
        curvature_percentiles[3],
        curvature_percentiles[4],
        len(curvature_maxima_x)/trip_length,
        curvature_maxima_percentiles[0],
        curvature_maxima_percentiles[1],
        curvature_maxima_percentiles[2],
        mean(centrifugal_acceleration),
        mean(curvature_maxima_speed),
        gps_signal(speed),
        mean_slowdown,
        mean_speedup,
    ]
    if not np.all(np.isfinite(features)):
        st()
    return features
コード例 #20
0
        def eval(v):
            st()
            print(vec.shape)
            regu_coef = self.FIM_invert_args['damping'] if regu_coef is None else regu_coef
            self.npg_grad = self.cg_solve(self.hvp, self.vpg_grad, x_0=self.vpg_grad.copy(),
                                cg_iters=self.FIM_invert_args['iters'])
            Hvp, npg_grad = self.sess.run([self.hvp, self.npg_grad], {self.obs_ph: observations,
                                             self.action_ph: actions,
                                             self.vec: vec,
                                             self.regu_coef: regu_coef})

            return Hvp, npg_grad
コード例 #21
0
    def forward(self, x):
        # Input is of shape [Batch, 3, channel, height, width]
        # Cylindrical representation of data: [log(|z|), x/|z|, y/|z|]

        wFMs = self.wFM(x)

        # Input is of shape [Batch, 3, channel, height, width]
        # Cylindrical representation of data: [log(|z|), x/|z|, y/|z|]

        # Separate each component and do convolution along each component
        # The input of each component is [Batch, in_channel, height, width]
        # The output of each component is [Batch, in_channel, height, width]
        # The final output of this layer would be [Batch, in_channel, height, width]
        mag_output = wFMs[:, 0, ...]
        cos_output = wFMs[:, 1, ...]
        sin_output = wFMs[:, 2, ...]
        cos_sin_output = wFMs[:, 1:, ...]

        # For center-cropping original input
        output_xdim = cos_output.shape[2]
        output_ydim = cos_output.shape[3]
        input_xdim = x.shape[3]
        input_ydim = x.shape[4]

        start_x = int((input_xdim - output_xdim) / 2)
        start_y = int((input_ydim - output_ydim) / 2)

        m = nn.Upsample(size=(input_xdim, input_ydim), mode='bilinear')

        cropped_input = x
        log_output = m(log_output)
        cos_sin_output = torch.cat((m(cos_sin_output[:, 0, ...]).unsqueeze(1),
                                    m(cos_sin_output[:, 1, ...]).unsqueeze(1)),
                                   dim=1)
        # Compute distance according to sqrt(log^2[(|z2|+b)/(|z1|+b)] + acos^2(x^T * y))
        # Need to add noise or else normalization may have zero entries which cause NaN
        # Compute distance according to sqrt(log^2[(|z2|+b)/(|z1|+b)] + acos^2(x^T * y))
        # Need to add noise or else normalization may have zero entries which cause NaN
        direction_noise = 1e-5
        directional_difference = cropped_input[:, 1:, ...] - cos_sin_output
        directional_difference = (
            directional_difference + direction_noise) / torch.sqrt(
                torch.sum(directional_difference**2 + direction_noise,
                          dim=1,
                          keepdim=True))

        if m(directional_difference):
            st()
        magnitude_difference = cropped_input[:, 0, ...] - log_output

        return torch.cat(
            (magnitude_difference.unsqueeze(1), directional_difference), dim=1)
コード例 #22
0
def main(unused_argv):
    print('Saving results to %s' % FLAGS.output_directory)

    if not exists(FLAGS.output_directory):
        makedirs(FLAGS.output_directory)

    from pdb import set_trace as st
    st()
    process_mpii(anno,
                 img_dir,
                 FLAGS.output_directory,
                 FLAGS.train_shards,
                 is_train=True)
コード例 #23
0
def find_path(graphd, src, dest, path=[]):
    path = path + [src]
    st()
    if src == dest: 
        return path
    if not src in graphd:
        return None
    
    for node in graphd[src]:
        if node not in path:
            new_path = find_path(graphd, node, dest, path)
            if new_path : return new_path
    return None
コード例 #24
0
 def restore(self, load_path):
     sum_tf, sumsq_tf, count_tf, mean, std = pickle.load(
         open(load_path, 'rb'))
     restored_value = tf.constant(sum_tf), tf.constant(
         sumsq_tf), tf.constant(count_tf), tf.constant(mean), tf.constant(
             std)
     var_list = [
         self.sum_tf, self.sumsq_tf, self.count_tf, self.mean, self.std
     ]
     st()
     self.sess.run([
         tf.assign(var, restored)
         for (var, restored) in zip(var_list, restored_value)
     ])
コード例 #25
0
    def __init__(self,
                 img_name_list_path,
                 voc12_root,
                 resize_long=None,
                 rescale=None,
                 img_normal=TorchvisionNormalize(),
                 hor_flip=False,
                 crop_size=None,
                 crop_method=None):
        super().__init__(img_name_list_path, voc12_root, resize_long, rescale,
                         img_normal, hor_flip, crop_size, crop_method)
        self.label_list = load_image_label_list_from_npy(self.img_name_list)

        st()
コード例 #26
0
 def get_random_faltness_radius_repeated_doubling(self, precision=0.0001):
     '''
         The goal of this is for the easy of computation of the epsilon radius of
         a network which is defined as follows:
             r(dx,eps,W) = sup{r \in R : |I(W) - I(W+r*dx)|<=eps}
         W_all = r*dx
         dx = isotropic unit vector from the net
     '''
     ''' record reference errors/losses '''
     #stats_collector.record_errors_loss_reference_net(criterion,error_criterion,net,trainloader,testloader,device)
     ''' get isotropic direction '''
     nb_params = nn_mdls.count_nb_params(self.net)
     v = torch.normal(torch.zeros(nb_params),
                      torch.ones(nb_params)).to(self.device)
     dx = v / v.norm(2)
     ''' fill up I list '''
     r = self.r_initial
     Loss_minima, Error_minima = evalaute_running_mdl_data_set(
         self.criterion, self.error_criterion, self.net, self.trainloader,
         self.device, self.iterations)
     while True:
         net_rdx = produce_new_translated_net(self.net, r, dx)
         Loss_rdx, Error_rdx = evalaute_running_mdl_data_set(
             self.criterion, self.error_criterion, net_rdx,
             self.trainloader, self.device, self.iterations)
         diff = Error_rdx - Error_minima
         print(f'\n--\nr = {r}')
         print(f'Error_minima={Error_minima}')
         print(f'Error_rdx={Error_rdx}')
         print(f'diff = {diff}')
         print(f'epsilon={self.epsilon}')
         print(f'abs(abs(diff)-eps)={abs(abs(diff)-self.epsilon)}')
         print(f'precision={precision}')
         print(
             f'abs(abs(diff)-eps) < precision={abs(abs(diff)-self.epsilon) < precision}'
         )
         print(
             f'approx_equals(diff,self.epsilon,precision=precision)={approx_equals(diff,self.epsilon,precision=precision)}'
         )
         ''' check if we reached epsilon jump '''
         if approx_equals(diff, self.epsilon, precision=precision
                          ):  ## 10^-4.5 for half machine precision
             ''' compute I(W+r*dx) = I(W+W_all)'''
             st()
             return r
         elif diff > self.epsilon:  # I(w+rdx) - I(W) > eps, r is too large
             r /= 2
         else:  # I(w+rdx) - I(W) < eps, r is too small
             r *= 1.5
コード例 #27
0
    def forward(self, x):
        y = list()

        if self.arch == 'C':
            x = self.conv1(x)
            x = self.bn1(x)
            x = self.relu(x)
        elif self.arch == 'D':
            x = self.layer0(x)

        x = self.layer1(x)
        y.append(x)
        x = self.layer2(x)
        y.append(x)

        x = self.layer3(x)
        y.append(x)

        x = self.layer4(x)
        y.append(x)

        x = self.layer5(x)
        y.append(x)

        if self.layer6 is not None:
            x = self.layer6(x)
            y.append(x)

        if self.layer7 is not None:
            x = self.layer7(x)
            y.append(x)

        if self.layer8 is not None:
            x = self.layer8(x)
            y.append(x)

        if self.out_map:
            x = self.fc(x)
        else:
            x = self.avgpool(x)
            x = self.fc(x)
            x = x.view(x.size(0), -1)

        st()

        if self.out_middle:
            return x, y
        else:
            return x
コード例 #28
0
def read_frame(vid_name, frame_num, box, x):
    img_name = os.path.join(vid_name, str(frame_num + 1) + '.jpg')
    if not os.path.isfile(img_name):
        img_name = os.path.join(vid_name, str(frame_num + 1) + '.png')

    img = cv2.imread(img_name)
    joints = x[:, :, frame_num] - 1.0
    box_frame = box[frame_num, :]
    st()
    scale = get_person_scale(joints)
    pos = np.zeros(2)
    pos[0] = (box_frame[0] + box_frame[2] / 2.0)
    pos[1] = (box_frame[1] + box_frame[3] / 2.0)

    return img, joints, scale, pos
コード例 #29
0
 def tf_get_actions(self, observations, update_filter=True, params=None):
     assert len(observations.shape) == 2
     st()
     obs = self.tf_obs_filters[0](observations, update=update_filter)
     if params == None:
         actions = tf.transpose(
             tf.matmul(self.policy_params["W"],
                       tf.transpose(obs))) + self.policy_params["b"]
     else:
         actions = tf.transpose(
             tf.matmul(tf.cast(params["W"], obs.dtype),
                       tf.transpose(obs))) + tf.cast(
                           params["b"], obs.dtype)
     actions = tf.clip_by_value(actions, LOG_SIG_MIN, LOG_SIG_MAX)
     return actions, {}
コード例 #30
0
def reinit_model(
    begin_layer,
    model,
    num_classes,
    train_all,
    prune_ratio,
    lr,
    random_prune=False,
):

    if isinstance(model, torchvision.models.resnet.ResNet):
        if len(model.layer1) == 2:
            model, model_params, fix_bn_dict = reinit_in_resnet.init_resnet18(
                model,
                num_classes,
                begin_layer,
                lr,
                train_all,
                random_prune=random_prune,
            )
            model = weight_prune.weight_prune(
                model,
                begin_layer,
                prune_ratio,
                reinit_in_resnet.resnet18_layer_depth,
            )
            return model, model_params, fix_bn_dict
        elif len(model.layer1) == 3:
            raise RuntimeError("lr")
            return reinit_in_resnet.init_resnet50(
                model,
                num_classes,
                begin_layer,
                train_all,
            )
        else:
            raise NotImplementedError
    elif isinstance(model, torchvision.models.vgg.VGG):
        raise RuntimeError("lr")
        return reinit_in_vgg.init_vgg16(
            model,
            num_classes,
            begin_layer,
            train_all,
        )
    else:
        st()
        raise NotImplementedError
コード例 #31
0
ファイル: create_globals.py プロジェクト: suztomo/hp
def run_machine(machine, env):
    daemons = machine.get('daemons', [])
    daemons = expand_commands(daemons, env)
    hp_node = int(machine.get('hp_node', -1))
    if verbose:
        print("  hp_node %d" % hp_node)
    if hp_node == -1:
        print("machine has invalid hp_node")
        st()
        sys.exit(1)
    for d in daemons:
        if 'port' not in d or 'command' not in d:
            print("machine does not have port or command")
            st()
            sys.exit(1)
        port = int(d['port'])
        notify_port_map(hp_node, port)
    run_daemons(hp_node, daemons)
コード例 #32
0
ファイル: views.py プロジェクト: ransomw/job-appappapp
def company_create():
    NAME_KEY = 'name'
    if request.method == 'POST':
        try:
            db.create_company(ImmutableDict(
                    name = request.form[NAME_KEY]))
            flash("company created")
        except DbException as e:
            flash("Error creating company: '" + e.msg + "'")
        except Exception as e:
            print "*************************************"
            print "unexpected error creating new company"
            print e
            print repr(e)
            st()
            print "*************************************"
            flash("unexpected error creating new company")
    return render_template('new_company.html', url_self=_url_company_create)
コード例 #33
0
def main(model='mlp', num_epochs=500):
    # Load the dataset
    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()

    # make effective data set smaller
    ind1 = np.arange(y_train.size); ind2 = np.arange(y_test.size)
    np.random.shuffle(ind1); np.random.shuffle(ind2)
    X_train, y_train = X_train[ind1[:2000]], y_train[ind1[:2000]]
    X_test, y_test = X_test[ind2[:1000]], y_test[ind2[:1000]]

    from pdb import set_trace as st

    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")
    if model == 'mlp':
        network = build_mlp(input_var)
    elif model.startswith('custom_mlp:'):
        depth, width, drop_in, drop_hid = model.split(':', 1)[1].split(',')
        network = build_custom_mlp(input_var, int(depth), int(width),
                                   float(drop_in), float(drop_hid))
    elif model == 'cnn':
        network = build_cnn(input_var)
    else:
        print("Unrecognized model type %r." % model)
        return

    # Create a loss expression for training, i.e., a scalar objective we want
    # to minimize (for our multi-class problem, it is the cross-entropy loss):
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = loss.mean()
    # We could add some weight decay as well here, see lasagne.regularization.

    # Create update expressions for training, i.e., how to modify the
    # parameters at each training step. Here, we'll use Stochastic Gradient
    # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
            loss, params, learning_rate=0.01, momentum=0.9)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network,
    # disabling dropout layers.
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
                                                            target_var)
    test_loss = test_loss.mean()
    # As a bonus, also create an expression for the classification accuracy:
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function([input_var, target_var], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])

    # Finally, launch the training loop.
    print("Starting training...")
    acc_list = np.zeros((num_epochs,))
    # We iterate over epochs:
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_batches = 0
        start_time = time.time()
        for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
            inputs, targets = batch
            train_err += train_fn(inputs, targets)
            train_batches += 1

        # And a full pass over the validation data:
        val_err = 0
        val_acc = 0
        val_batches = 0
        for batch in iterate_minibatches(X_val, y_val, 200, shuffle=False):
            inputs, targets = batch
            err, acc = val_fn(inputs, targets)
            val_err += err
            val_acc += acc
            val_batches += 1

        # Then we print the results for this epoch:
        print("Epoch {} of {} took {:.3f}s".format(
            epoch + 1, num_epochs, time.time() - start_time))
        print("  training loss:\t\t{:.6f}".format(train_err / train_batches))
        print("  validation loss:\t\t{:.6f}".format(val_err / val_batches))
        print("  validation accuracy:\t\t{:.2f} %".format(
            val_acc / val_batches * 100))

        acc_list[epoch] = val_acc / val_batches

    # After training, we compute and print the test error:
    test_err = 0
    test_acc = 0
    test_batches = 0
    for batch in iterate_minibatches(X_test, y_test, 200, shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        test_err += err
        test_acc += acc
        test_batches += 1

    st()

    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test accuracy:\t\t{:.2f} %".format(
        test_acc / test_batches * 100))
コード例 #34
0
ファイル: gui2.py プロジェクト: chipmunk360/hello_py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 26 17:27:11 2016

@author: pi
"""
import pdb as db
from pdb import set_trace as st
import easygui
easygui.msgbox("Hello there!")
st()
user_reponse = easygui("hello there!")

easygui.msgbox("Leo is haunting you for life!")