from munch import Munch

    config_file = "/Users/sfish0101/Documents/2020/Spring/quantized_policies/trained_models/checkpoints/MUJOCO_OFFLINE/train_viewpred_occ/config.pkl"
    model_file = "/Users/sfish0101/Documents/2020/Spring/quantized_policies/trained_models/checkpoints/MUJOCO_OFFLINE/train_viewpred_occ/model-40000.pth"
    with open(config_file, "rb") as f:
        config = pickle.load(f)
    config = Munch(config)

    with open("trained_models/feed.pkl", 'rb') as f:
        feed = pickle.load(f)

    model = model_nets.mujoco_offline.MujocoOffline(config)

    saveload_config = dict()
    saveload_config["total_init"] = True
    saveload_config["reset_iter"] = False
    saveload_config["loadname"] = dict()
    saveload_config["loadname"]["model"] = model_file
    saveload_config = Munch(saveload_config)

    saverloader = saverloader.SaverLoader(saveload_config,
                                          model,
                                          load_only=True)
    start_iter = saverloader.load_weights(optimizer=None)

    result = model.predict_forward(feed)

    imageio.imwrite("tmp/rgb_e.png", result['rgb_e'][0])
    imageio.imwrite("tmp/rgb_camXs.png",
                    feed['rgb_camXs'][0, 0].permute(1, 2, 0).detach().numpy())
コード例 #2
0
    def go(self):
        self.start_time = time.time()
        self.initialize_model()
        print("------ Done creating models ------")

        if hyp.lr > 0:
            params_to_optimize = self.model.parameters()
            self.optimizer = torch.optim.Adam(params_to_optimize, lr=hyp.lr)
        else:
            self.optimizer = None

        self.start_iter = saverloader.load_weights(self.model, self.optimizer)
        print("------ Done loading weights ------")
        # self.start_iter = 0

        set_nums = []
        set_names = []
        set_seqlens = []
        set_batch_sizes = []
        set_inputs = []
        set_writers = []
        set_log_freqs = []
        set_do_backprops = []
        set_dicts = []
        set_loaders = []

        for set_name in hyp.set_names:
            if hyp.sets_to_run[set_name]:
                set_nums.append(hyp.set_nums[set_name])
                set_names.append(set_name)
                set_seqlens.append(hyp.seqlens[set_name])
                set_batch_sizes.append(hyp.batch_sizes[set_name])
                set_inputs.append(self.all_inputs[set_name])
                set_writers.append(
                    SummaryWriter(self.log_dir + '/' + set_name,
                                  max_queue=1000000,
                                  flush_secs=1000000))
                set_log_freqs.append(hyp.log_freqs[set_name])
                set_do_backprops.append(hyp.sets_to_backprop[set_name])
                set_dicts.append({})
                set_loaders.append(iter(set_inputs[-1]))

        if hyp.do_test:
            all_ious = np.zeros([hyp.max_iters, hyp.S_test], np.float32)
            test_count = 0

        for step in list(range(self.start_iter + 1, hyp.max_iters + 1)):
            for i, (set_input) in enumerate(set_inputs):
                if step % len(
                        set_input
                ) == 0:  #restart after one epoch. Note this does nothing for the tfrecord loader
                    set_loaders[i] = iter(set_input)
            for (set_num, set_name, set_seqlen, set_batch_size, set_input,
                 set_writer, set_log_freq, set_do_backprop, set_dict,
                 set_loader) in zip(set_nums, set_names, set_seqlens,
                                    set_batch_sizes, set_inputs, set_writers,
                                    set_log_freqs, set_do_backprops, set_dicts,
                                    set_loaders):

                log_this = np.mod(step, set_log_freq) == 0
                total_time, read_time, iter_time = 0.0, 0.0, 0.0

                output_dict = dict()

                if log_this or set_do_backprop or hyp.do_test:
                    # print('%s: set_num %d; log_this %d; set_do_backprop %d; ' % (set_name, set_num, log_this, set_do_backprop))
                    # print('log_this = %s' % log_this)
                    # print('set_do_backprop = %s' % set_do_backprop)

                    read_start_time = time.time()

                    feed = next(set_loader)
                    feed_cuda = {}
                    for k in feed:
                        try:
                            feed_cuda[k] = feed[k].cuda(non_blocking=True)
                        except:
                            # some things are not tensors (e.g., filename)
                            feed_cuda[k] = feed[k]
                    read_time = time.time() - read_start_time

                    feed_cuda['writer'] = set_writer
                    feed_cuda['global_step'] = step
                    feed_cuda['set_num'] = set_num
                    feed_cuda['set_log_freq'] = set_log_freq
                    feed_cuda['set_name'] = set_name
                    feed_cuda['set_seqlen'] = set_seqlen
                    feed_cuda['set_batch_size'] = set_batch_size

                    iter_start_time = time.time()
                    if set_do_backprop:
                        self.model.train()
                        loss, results, returned_early = self.model(feed_cuda)
                    else:
                        self.model.eval()
                        with torch.no_grad():
                            loss, results, returned_early = self.model(
                                feed_cuda)
                    loss_py = loss.cpu().item()

                    if hyp.do_test and (not returned_early):
                        ious = results['ious']
                        ious = ious[0].cpu().numpy()
                        all_ious[test_count] = ious
                        test_count += 1
                        # print('all_ious', all_ious[:test_count])
                        mean_ious = np.mean(all_ious[:test_count], axis=0)
                        print('mean_ious', mean_ious)

                    if ((not returned_early) and (set_do_backprop)
                            and (hyp.lr > 0)):

                        self.optimizer.zero_grad()
                        loss.backward()
                        self.optimizer.step()

                    if hyp.do_emb3D:

                        def update_slow_network(slow_net,
                                                fast_net,
                                                beta=0.999):
                            param_k = slow_net.state_dict()
                            param_q = fast_net.named_parameters()
                            for n, q in param_q:
                                if n in param_k:
                                    param_k[n].data.copy_(beta *
                                                          param_k[n].data +
                                                          (1 - beta) * q.data)
                            slow_net.load_state_dict(param_k)

                        update_slow_network(self.model.featnet3D_slow,
                                            self.model.featnet3D)

                    iter_time = time.time() - iter_start_time
                    total_time = time.time() - self.start_time

                    print(
                        "%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)"
                        % (hyp.name, step, hyp.max_iters, total_time,
                           read_time, iter_time, loss_py, set_name))
                    if log_this:
                        # if log_this and (not returned_early):
                        set_writer.flush()

            if hyp.do_save_outputs:
                out_fn = '%s_output_dict.npy' % (hyp.name)
                np.save(out_fn, output_dict)
                print('saved %s' % out_fn)

            if np.mod(step, hyp.snap_freq) == 0 and hyp.lr > 0:
                saverloader.save(self.model, self.checkpoint_dir, step,
                                 self.optimizer)

        for writer in set_writers:  #close writers to flush cache into file
            writer.close()

        if hyp.do_test:
            mean_ious = np.mean(all_ious[:test_count], axis=0)
            print('mean_ious', mean_ious)
コード例 #3
0
    def go(self):
        self.start_time = time.time()
        self.initialize_model()
        print("------ Done creating models ------")

        if hyp.lr > 0:
            params_to_optimize = self.model.parameters()
            self.optimizer = torch.optim.Adam(params_to_optimize, lr=hyp.lr)
        else:
            self.optimizer = None

        self.start_iter = saverloader.load_weights(self.model, self.optimizer)

        # init slow params with fast params
        if hyp.do_emb2d:
            self.model.feat2dnet_slow.load_state_dict(
                self.model.feat2dnet.state_dict())
        if hyp.do_emb3d:
            self.model.feat3dnet_slow.load_state_dict(
                self.model.feat3dnet.state_dict())

        print("------ Done loading weights ------")

        set_nums = []
        set_names = []
        set_batch_sizes = []
        set_data_formats = []
        set_seqlens = []
        set_inputs = []
        set_writers = []
        set_log_freqs = []
        set_do_backprops = []
        set_dicts = []
        set_loaders = []

        for set_name in hyp.set_names:
            if hyp.sets_to_run[set_name]:
                set_nums.append(hyp.set_nums[set_name])
                set_data_formats.append(hyp.data_formats[set_name])
                set_seqlens.append(hyp.seqlens[set_name])
                set_names.append(set_name)
                set_batch_sizes.append(hyp.batch_sizes[set_name])
                set_inputs.append(self.all_inputs[set_name])
                set_writers.append(
                    SummaryWriter(self.log_dir + '/' + set_name,
                                  max_queue=MAX_QUEUE,
                                  flush_secs=60))
                set_log_freqs.append(hyp.log_freqs[set_name])
                set_do_backprops.append(hyp.sets_to_backprop[set_name])
                set_dicts.append({})
                set_loaders.append(iter(set_inputs[-1]))

        for step in list(range(self.start_iter + 1, hyp.max_iters + 1)):
            # reset set_loader after each epoch
            for i, (set_input) in enumerate(set_inputs):
                if step % len(set_input) == 0:
                    set_loaders[i] = iter(set_input)
            for (set_num, set_data_format, set_seqlen, set_name,
                 set_batch_size, set_input, set_writer, set_log_freq,
                 set_do_backprop, set_dict,
                 set_loader) in zip(set_nums, set_data_formats, set_seqlens,
                                    set_names, set_batch_sizes, set_inputs,
                                    set_writers, set_log_freqs,
                                    set_do_backprops, set_dicts, set_loaders):
                log_this = np.mod(step, set_log_freq) == 0
                total_time, read_time, iter_time = 0.0, 0.0, 0.0

                output_dict = dict()

                if log_this or set_do_backprop or hyp.do_test:
                    # print('%s: set_num %d; set_data_format %s; set_seqlen %s; log_this %d; set_do_backprop %d; ' % (
                    #     set_name, set_num, set_data_format, set_seqlen, log_this, set_do_backprop))

                    read_start_time = time.time()
                    feed, data_ind = next(set_loader)
                    feed_cuda = {}
                    for k in feed:
                        try:
                            feed_cuda[k] = feed[k].cuda(non_blocking=True)
                        except:
                            # some things are not tensors (e.g., filename)
                            feed_cuda[k] = feed[k]

                    read_time = time.time() - read_start_time
                    feed_cuda['writer'] = set_writer
                    feed_cuda['global_step'] = step
                    feed_cuda['set_num'] = set_num
                    feed_cuda['set_log_freq'] = set_log_freq
                    feed_cuda['set_data_format'] = set_data_format
                    feed_cuda['set_seqlen'] = set_seqlen
                    feed_cuda['set_name'] = set_name
                    feed_cuda['set_batch_size'] = set_batch_size

                    iter_start_time = time.time()
                    if set_do_backprop:
                        self.model.train()
                        loss, results, returned_early = self.model(feed_cuda)
                    else:
                        self.model.eval()
                        with torch.no_grad():
                            loss, results, returned_early = self.model(
                                feed_cuda)
                    loss_py = loss.cpu().item()

                    if (not returned_early) and (set_do_backprop) and (hyp.lr >
                                                                       0):
                        self.optimizer.zero_grad()
                        loss.backward()
                        self.optimizer.step()

                    def update_slow_network(slow_net, fast_net, beta=0.999):
                        param_k = slow_net.state_dict()
                        param_q = fast_net.named_parameters()
                        for n, q in param_q:
                            if n in param_k:
                                param_k[n].data.copy_(beta * param_k[n].data +
                                                      (1 - beta) * q.data)
                        slow_net.load_state_dict(param_k)

                    if hyp.do_emb3d:
                        update_slow_network(self.model.feat3dnet_slow,
                                            self.model.feat3dnet)

                    if hyp.do_emb2d:
                        update_slow_network(self.model.feat2dnet_slow,
                                            self.model.feat2dnet)

                    iter_time = time.time() - iter_start_time
                    total_time = time.time() - self.start_time

                    print(
                        "%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)"
                        % (hyp.name, step, hyp.max_iters, total_time,
                           read_time, iter_time, loss_py, set_name))

            if np.mod(step, hyp.snap_freq) == 0 and hyp.lr > 0:
                saverloader.save(self.model, self.checkpoint_dir, step,
                                 self.optimizer)

        for writer in set_writers:  #close writers to flush cache into file
            writer.close()
コード例 #4
0
    def go(self):
        self.start_time = time.time()
        self.infer()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=hyp.lr, weight_decay=hyp.weight_decay)
        print("------ Done creating models ------")
        # st()
        self.start_iter = saverloader.load_weights(self.model, self.optimizer)
        if hyp.self_improve_once or hyp.filter_boxes:
          self.model.detnet_target.load_state_dict(self.model.detnet.state_dict())

        if hyp.moc:
            self.init_model_k(self.model,self.model_key)
        
        if hyp.sets_to_run["test"]:
            self.start_iter = 0 
        print("------ Done loading weights ------")

        set_nums = []
        set_names = []
        set_inputs = []
        set_writers = []
        set_log_freqs = []
        set_do_backprops = []
        set_dicts = []
        set_loaders = []


        # st()
        for set_name in hyp.set_names:
            if hyp.sets_to_run[set_name]:
                set_nums.append(hyp.set_nums[set_name])
                set_names.append(set_name)
                set_inputs.append(self.all_inputs[set_name])
                set_writers.append(SummaryWriter(self.log_dir + '/' + set_name, max_queue=MAX_QUEUE, flush_secs=60))
                set_log_freqs.append(hyp.log_freqs[set_name])
                set_do_backprops.append(hyp.sets_to_backprop[set_name])
                set_dicts.append({})
                set_loaders.append(iter(set_inputs[-1]))

            # st()

        for step in range(self.start_iter+1, hyp.max_iters+1):
            for i, (set_input) in enumerate(set_inputs):
                if step % len(set_input) == 0: #restart after one epoch. Note this does nothing for the tfrecord loader
                    set_loaders[i] = iter(set_input)

            for (set_num,
                    set_name,
                    set_input,
                    set_writer,
                    set_log_freq,
                    set_do_backprop,
                    set_dict,
                    set_loader,
                    ) in zip(
                    set_nums,
                    set_names,
                    set_inputs,
                    set_writers,
                    set_log_freqs,
                    set_do_backprops,
                    set_dicts,
                    set_loaders,
                    ):   

                log_this = np.mod(step, set_log_freq)==0
                total_time, read_time, iter_time = 0.0, 0.0, 0.0

                if log_this or set_do_backprop or hyp.break_constraint:
                    read_start_time = time.time()

                    feed = next(set_loader)

                    # st()
                    feed_cuda = {}
                    # st()
                    if hyp.do_clevr_sta:
                        tree_seq_filename = feed.pop('tree_seq_filename')
                        filename_e = feed.pop('filename_e')
                        filename_g = feed.pop('filename_g')
                    # st()
                    # st()
                    if hyp.dataset_name == "replica" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "clevr_vqa"  or hyp.dataset_name == "carla_det":
                        if hyp.debug_match or hyp.do_match_det:
                                feed['classes'] = np.array(feed['classes']).reshape([hyp.B*2,hyp.N])                                
                        classes = feed.pop('classes')

                    for k in feed:
                        #feed_cuda[k] = feed[k].to(self.device)
                        if hyp.typeVal == "content" or hyp.debug_match or hyp.do_match_det:
                            feed_cuda[k] = feed[k].cuda(non_blocking=True).float().squeeze(0)
                        else:
                            feed_cuda[k] = feed[k].cuda(non_blocking=True).float()

                    # feed_cuda = next(iter(set_input))
                    read_time = time.time() - read_start_time
                    # st()

                    if hyp.do_clevr_sta:
                        if hyp.typeVal == "content" or hyp.debug_match or hyp.do_match_det:
                            feed_cuda['tree_seq_filename'] = np.array(tree_seq_filename).squeeze(1)
                        else:
                            feed_cuda['tree_seq_filename'] = tree_seq_filename
                        feed_cuda['filename_e'] = filename_e
                        feed_cuda['filename_g'] = filename_g
                    
                    if hyp.dataset_name == "replica" or hyp.dataset_name == "carla_mix" or hyp.dataset_name == "clevr_vqa"   or hyp.dataset_name == "carla_det":
                        if not hyp.debug_match and not hyp.do_match_det:
                            classes = np.transpose(np.array(classes))
                        feed_cuda['classes'] = classes
                        feed_cuda['filename_e'] = filename_e
                        feed_cuda['filename_g'] = filename_g                            

                    feed_cuda['writer'] = set_writer
                    feed_cuda['global_step'] = step
                    feed_cuda['set_num'] = set_num
                    feed_cuda['set_name'] = set_name
                    iter_start_time = time.time()

                    if hyp.typeVal == "content" or hyp.debug_match or hyp.do_match_det:
                        hyp.B = hyp.B*2
                    if set_do_backprop:
                        start_time =  time.time()                            
                        self.model.train()
                        # st()
                        loss, results = self.model(feed_cuda)
                        if hyp.profile_time:
                            print("forwardpass time",time.time()-start_time)
                    else:
                        self.model.eval()
                        with torch.no_grad():
                            loss, results = self.model(feed_cuda)
                        if hyp.halucinate_vals !=1:
                            if hal_num == 0:
                                maps = []
                                filenames = []

                            maps.append(results['maps'])
                            filenames.append(results['filenames'])

                            if (hal_num+1) == (hyp.halucinate_vals):
                                maps_avg = np.mean(np.stack(maps),axis=0)
                                for ind, overlap in enumerate(results['ious']):
                                    results['summ'].summ_scalar('ap_avg/%.2f_iou' % overlap, maps_avg[ind])



                    if hyp.typeVal == "content":
                        hyp.B = hyp.B//2



                    loss_vis = loss.cpu().item()

                    summ_writer = utils_improc.Summ_writer(writer=feed_cuda['writer'],
                                           global_step=feed_cuda['global_step'],
                                           set_name=feed_cuda['set_name'],
                                           fps=8)
                    summ_writer.summ_scalar('loss',loss_vis)
                    

       
                    if set_do_backprop:
                        if hyp.accumulate_grad:
                            loss = loss / hyp.accumulation_steps                # Normalize our loss (if averaged)
                            loss.backward()                                 # Backward pass
                            if (step) % hyp.accumulation_steps == 0:             # Wait for several backward steps
                                self.optimizer.step()                            # Now we can do an optimizer step
                                self.optimizer.zero_grad()
                        else:
                            self.optimizer.zero_grad()
                            loss.backward()
                            self.optimizer.step()



                    iter_time = time.time()-iter_start_time
                    total_time = time.time()-self.start_time

                    print("%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)" % (hyp.name,
                                                                                        step,
                                                                                        hyp.max_iters,
                                                                                        total_time,
                                                                                        read_time,
                                                                                        iter_time,
                                                                                        loss_vis,
                                                                                        set_name))
                # st()
                if np.mod(step, hyp.snap_freq) == 0:
                    saverloader.save(self.model, self.checkpoint_dir, step, self.optimizer)

            for writer in set_writers: #close writers to flush cache into file
                writer.close()