Exemple #1
0
 def get(self):
     # self.write("Hello, world")
     arguments = self.request.arguments
     error_code = ""
     path = self.request.path
     logger.info("the path is : " + path)
     self.write("Please contact Admin.error code: 306")
Exemple #2
0
    def select_highest_voted_to_chain(self):
        # ---- Add all ticks with same amount of votes to the dictionary ----
        # WARNING: This MUST happen less than 50% of the time and result in
        # usually only 1 winner, so that chain only branches occasionally
        # and thus doesn't become an exponentially growing tree.
        # This is the main condition to achieve network-wide consensus
        top_tick_refs = self.top_tick_refs()

        highest_ticks = self.get_ticks_by_ref(top_tick_refs)

        # TODO: Should always be > 0 but sometimes not, when unsynced...
        if len(highest_ticks) > 0:
            logger.debug("Top tick refs with " +
                         str(len(highest_ticks[0]['list'])) + " pings each:" +
                         str(top_tick_refs))

            tick_dict = {}
            for tick in highest_ticks:
                to_add = self.json_tick_to_chain_tick(tick)
                tick_dict = {**tick_dict, **to_add}

            # TODO: Is this atomic?
            if self.chain.full():
                # This removes earliest item from queue
                self.chain.get()

            self.chain.put(tick_dict)
        else:
            logger.info("Warning!! No ticks added to chain!!")

        self.restart_cycle()
Exemple #3
0
    def send_mutual_add_requests(self, peerslist):
        successful_adds = 0
        # Mutual add peers
        for peer in peerslist:
            if peer not in self.peers and len(
                    self.peers) <= config['max_peers']:
                content = {"port": self.port, 'pubkey': credentials.pubkey}
                signature = sign(standard_encode(content), credentials.privkey)
                content['signature'] = signature
                status_code = None
                response = None
                result, success = attempt(requests.post,
                                          False,
                                          url=peer + '/mutual_add',
                                          json=content,
                                          timeout=config['timeout'])
                if success:
                    status_code = result.status_code
                    response = result.text
                else:
                    logger.debug("Couldn't connect to " + peer)

                if status_code in [201, 503]:
                    if status_code == 201:
                        logger.info("Adding peer " + str(peer))
                        peer_addr = response
                        self.register_peer(peer, peer_addr)
                        successful_adds += 1
                    if status_code == 503:
                        logger.info("Peer was at peer-maximum")

        return successful_adds
Exemple #4
0
    def _build_net(self):
        if self.net_name == "psmnet" or self.net_name == "ganet":
            self.net = build_net(self.net_name)(self.maxdisp)
        else:
            self.net = build_net(self.net_name)(batchNorm=False, lastRelu=True, maxdisp=self.maxdisp)

        self.is_pretrain = False

        if self.ngpu > 1:
            self.net = torch.nn.DataParallel(self.net, device_ids=self.devices).cuda()
        else:
            self.net.cuda()

        if self.pretrain == '':
            logger.info('Initial a new model...')
        else:
            if os.path.isfile(self.pretrain):
                model_data = torch.load(self.pretrain)
                logger.info('Load pretrain model: %s', self.pretrain)
                if 'state_dict' in model_data.keys():
                    self.net.load_state_dict(model_data['state_dict'])
                else:
                    self.net.load_state_dict(model_data)
                self.is_pretrain = True
            else:
                logger.warning('Can not find the specific model %s, initial a new model...', self.pretrain)
 def _parse_nodered_flow(self):
     flow = get_flow()
     if flow:
         for f in flow['flows']:
             if f['type'] == 'object-detect':
                 self.config = f
                 logger.info(f'Parsed flow - {f}')
                 self._get_sources(flow)
                 return True
Exemple #6
0
    def _prepare_dataset(self):
        if self.net_name in ["dispnormnet"]:
            self.disp_on = True
            self.norm_on = True
            self.angle_on = False
        else:
            self.disp_on = True
            self.norm_on = False
            self.angle_on = False

        if self.dataset == 'irs':
            train_dataset = SIRSDataset(txt_file=self.trainlist,
                                        root_dir=self.datapath,
                                        phase='train',
                                        load_disp=self.disp_on,
                                        load_norm=self.norm_on,
                                        to_angle=self.angle_on)
            test_dataset = SIRSDataset(txt_file=self.vallist,
                                       root_dir=self.datapath,
                                       phase='test',
                                       load_disp=self.disp_on,
                                       load_norm=self.norm_on,
                                       to_angle=self.angle_on)
        if self.dataset == 'sceneflow':
            train_dataset = SceneFlowDataset(txt_file=self.trainlist,
                                             root_dir=self.datapath,
                                             phase='train')
            test_dataset = SceneFlowDataset(txt_file=self.vallist,
                                            root_dir=self.datapath,
                                            phase='test')
        if self.dataset == 'sintel':
            train_dataset = SintelDataset(txt_file=self.trainlist,
                                          root_dir=self.datapath,
                                          phase='train')
            test_dataset = SintelDataset(txt_file=self.vallist,
                                         root_dir=self.datapath,
                                         phase='test')

        self.fx, self.fy = train_dataset.get_focal_length()
        self.img_height, self.img_width = train_dataset.get_img_size()
        self.scale_height, self.scale_width = test_dataset.get_scale_size()

        datathread = 4
        if os.environ.get('datathread') is not None:
            datathread = int(os.environ.get('datathread'))
        logger.info("Use %d processes to load data..." % datathread)
        self.train_loader = DataLoader(train_dataset, batch_size = self.batch_size, \
                                shuffle = True, num_workers = datathread, \
                                pin_memory = True)

        self.test_loader = DataLoader(test_dataset, batch_size = self.batch_size / 4, \
                                shuffle = False, num_workers = datathread, \
                                pin_memory = True)
        self.num_batches_per_epoch = len(self.train_loader)
Exemple #7
0
def verify_token_step(request_hander, user_id_required, arguments, use_cookie,
                      site, exclude_sites_list):
    if user_id_required == "True":
        if use_cookie != "True" and "token" in arguments:
            if "token" in arguments:
                user_token = request_hander.get_argument("token")
            else:
                logger.error("can not get user token")
                request_hander.write("Please contact Admin.error code: 304")
                return

        if token_verify(user_token) is True:  # token 值到用户库验证
            logger.info("user token verify succeed")
        else:
            logger.error("user token verify failed:{}".format(user_token))
            request_hander.write("用户超时或者在其它方登录(303)")
            return
    else:
        logger.info("user token  not  required")
Exemple #8
0
    def _prepare_dataset(self):
        if self.dataset == 'sceneflow' or self.dataset == 'irs':
            train_dataset = SceneFlowDataset(txt_file=self.trainlist,
                                             root_dir=self.datapath,
                                             phase='train',
                                             load_disp=self.disp_on,
                                             load_norm=self.norm_on)
            test_dataset = SceneFlowDataset(txt_file=self.vallist,
                                            root_dir=self.datapath,
                                            phase='test',
                                            load_disp=self.disp_on,
                                            load_norm=self.norm_on)
        if self.dataset == 'middlebury':
            train_dataset = MiddleburyDataset(txt_file=self.trainlist,
                                              root_dir=self.datapath,
                                              phase='train')
            test_dataset = MiddleburyDataset(txt_file=self.vallist,
                                             root_dir=self.datapath,
                                             phase='test')
        if self.dataset == 'sintel':
            train_dataset = SintelDataset(txt_file=self.trainlist,
                                          root_dir=self.datapath,
                                          phase='train')
            test_dataset = SintelDataset(txt_file=self.vallist,
                                         root_dir=self.datapath,
                                         phase='test')

        self.img_size = test_dataset.get_img_size()
        self.scale_size = test_dataset.get_scale_size()
        self.focal_length = test_dataset.get_focal_length()

        datathread = 4
        if os.environ.get('datathread') is not None:
            datathread = int(os.environ.get('datathread'))
        logger.info("Use %d processes to load data..." % datathread)
        self.train_loader = DataLoader(train_dataset, batch_size = self.batch_size, \
                                shuffle = True, num_workers = datathread, \
                                pin_memory = True)

        self.test_loader = DataLoader(test_dataset, batch_size = self.batch_size, \
                                shuffle = False, num_workers = datathread, \
                                pin_memory = True)
        self.num_batches_per_epoch = len(self.train_loader)
Exemple #9
0
    def _build_net(self):

        # build net according to the net name
        if self.net_name == "psmnet":
            self.net = build_net(self.net_name)(self.maxdisp)
        elif self.net_name in ["normnets"]:
            self.net = build_net(self.net_name)()
        else:
            self.net = build_net(self.net_name)(batchNorm=False,
                                                lastRelu=True,
                                                maxdisp=self.maxdisp)

        if self.net_name in ['dnfusionnet', 'dtonnet']:
            self.net.set_focal_length(self.focal_length[0],
                                      self.focal_length[1])

        self.is_pretrain = False

        if self.ngpu >= 1:
            self.net = torch.nn.DataParallel(self.net,
                                             device_ids=self.devices).cuda()
        else:
            self.net.cuda()

        if self.pretrain == '':
            logger.info('Initial a new model...')
        else:
            if os.path.isfile(self.pretrain):
                model_data = torch.load(self.pretrain)
                logger.info('Load pretrain model: %s', self.pretrain)
                if 'state_dict' in model_data.keys():
                    self.net.load_state_dict(model_data['state_dict'])
                elif 'model' in model_data.keys():
                    self.net.load_state_dict(model_data['model'])
                else:
                    self.net.load_state_dict(model_data)
                self.is_pretrain = True
            else:
                logger.warning(
                    'Can not find the specific model %s, initial a new model...',
                    self.pretrain)
Exemple #10
0
    def _get_sources(self, flow):
        def __find_parent_cf_node(node):
            if node['type'] == 'camera-feed':
                return node
            for _node in flow['flows']:
                if _node.get('wires') and node['id'] in _node['wires'][
                        0] and _node['type'] != 'object-detect':
                    return __find_parent_cf_node(_node)

        for f in flow['flows']:
            if f.get('wires') and self.config['id'] in f['wires'][0]:
                cf_node = __find_parent_cf_node(f)
                logger.info(
                    f'Found a parent & source CF node - {f["id"]} & {cf_node["id"]}'
                )
                f['roi_list'] = get_contour_list(
                    rois=cf_node.get('rois', []),
                    width=cf_node['frame_width'],
                    height=cf_node['frame_height'])
                self.sources.append(f)
                self._last_saved_time.append(0)
Exemple #11
0
def load_and_cache_examples(args, processor, data_type='train'):
    # Load data features from cache or dataset file
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
    cached_examples_file = os.path.join(args.data_path,
                                        f'cached_crf-{data_type}')
    if os.path.exists(cached_examples_file):
        logger.info("Loading features from cached file %s",
                    cached_examples_file)
        examples = torch.load(cached_examples_file)
    else:
        logger.info("Creating features from dataset file at %s",
                    args.data_path)
        if data_type == 'train':
            examples = processor.get_train_examples()
        elif data_type == 'dev':
            examples = processor.get_dev_examples()
        else:
            examples = processor.get_test_examples()
        if args.local_rank in [-1, 0]:
            logger.info("Saving features into cached file %s",
                        cached_examples_file)
            torch.save(examples, str(cached_examples_file))
    if args.local_rank == 0:
        torch.distributed.barrier()
    return examples
Exemple #12
0
    def _prepare_dataset(self):
        if self.dataset == 'sceneflow':
            train_dataset = SceneFlowDataset(txt_file=self.trainlist,
                                             root_dir=self.datapath,
                                             phase='train')
            test_dataset = SceneFlowDataset(txt_file=self.vallist,
                                            root_dir=self.datapath,
                                            phase='test')

        self.img_height, self.img_width = train_dataset.get_img_size()
        self.scale_height, self.scale_width = test_dataset.get_scale_size()

        datathread = 16
        if os.environ.get('datathread') is not None:
            datathread = int(os.environ.get('datathread'))
        logger.info("Use %d processes to load data..." % datathread)
        self.train_loader = DataLoader(train_dataset, batch_size = self.batch_size, \
                                shuffle = True, num_workers = datathread, \
                                pin_memory = True)

        self.test_loader = DataLoader(test_dataset, batch_size = self.batch_size // 4, \
                                shuffle = False, num_workers = datathread, \
                                pin_memory = True)
        self.num_batches_per_epoch = len(self.train_loader)
Exemple #13
0
    def train_one_epoch(self, epoch):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        flow2_EPEs = AverageMeter()
        norm_EPEs = AverageMeter()
        angle_EPEs = AverageMeter()
        # switch to train mode
        self.net.train()
        end = time.time()
        cur_lr = self.adjust_learning_rate(epoch)
        logger.info("learning rate of epoch %d: %f." % (epoch, cur_lr))

        for i_batch, sample_batched in enumerate(self.train_loader):

            left_input = torch.autograd.Variable(
                sample_batched['img_left'].cuda(), requires_grad=False)
            right_input = torch.autograd.Variable(
                sample_batched['img_right'].cuda(), requires_grad=False)
            input = torch.cat((left_input, right_input), 1)

            if self.disp_on:
                target_disp = sample_batched['gt_disp']
                target_disp = target_disp.cuda()
                target_disp = torch.autograd.Variable(target_disp,
                                                      requires_grad=False)
            if self.norm_on:
                if self.angle_on:
                    target_angle = sample_batched['gt_angle']
                    target_angle = target_angle.cuda()
                    target_angle = torch.autograd.Variable(target_angle,
                                                           requires_grad=False)
                else:
                    target_norm = sample_batched['gt_norm']
                    target_norm = target_norm.cuda()
                    target_norm = torch.autograd.Variable(target_norm,
                                                          requires_grad=False)

            input_var = torch.autograd.Variable(input, requires_grad=False)
            data_time.update(time.time() - end)

            self.optimizer.zero_grad()
            if self.net_name in ['dtonnet', 'dnfusionnet']:
                disp_norm = self.net(input_var)

                disps = disp_norm[0]
                normal = disp_norm[1]
                #print("gt norm[%f-%f], predict norm[%f-%f]." % (torch.min(target_norm).data.item(), torch.max(target_norm).data.item(), torch.min(normal).data.item(), torch.max(normal).data.item()))
                loss_disp = self.criterion(disps, target_disp)
                valid_norm_idx = (target_norm >= -1.0) & (target_norm <= 1.0)
                loss_norm = F.mse_loss(normal[valid_norm_idx],
                                       target_norm[valid_norm_idx],
                                       size_average=True) * 3.0
                loss = loss_disp + loss_norm

                final_disp = disps[0]

                flow2_EPE = self.epe(final_disp, target_disp)
                norm_EPE = loss_norm

            elif self.net_name in ["normnets"]:
                normal = self.net(input_var)
                #print("gt norm[%f-%f], predict norm[%f-%f]." % (torch.min(target_norm).data.item(), torch.max(target_norm).data.item(), torch.min(normal).data.item(), torch.max(normal).data.item()))
                valid_norm_idx = (target_norm >= -1.0) & (target_norm <= 1.0)
                loss_norm = F.mse_loss(normal[valid_norm_idx],
                                       target_norm[valid_norm_idx],
                                       size_average=True) * 3.0
                #print(loss_disp, loss_norm)
                loss = loss_norm
                norm_EPE = loss_norm

            elif self.net_name == "fadnet":
                output_net1, output_net2 = self.net(input_var)
                loss_net1 = self.criterion(output_net1, target_disp)
                loss_net2 = self.criterion(output_net2, target_disp)
                loss = loss_net1 + loss_net2
                output_net2_final = output_net2[0]
                flow2_EPE = self.epe(output_net2_final, target_disp)
            elif self.net_name == "dispnetcss":
                output_net1, output_net2, output_net3 = self.net(input_var)
                loss_net1 = self.criterion(output_net1, target_disp)
                loss_net2 = self.criterion(output_net2, target_disp)
                loss_net3 = self.criterion(output_net3, target_disp)
                loss = loss_net1 + loss_net2 + loss_net3
                output_net3_final = output_net3[0]
                flow2_EPE = self.epe(output_net3_final, target_disp)
            elif self.net_name == "psmnet":
                mask = target_disp < self.maxdisp
                mask.detach_()

                output1, output2, output3 = self.net(input_var)
                output1 = torch.unsqueeze(output1, 1)
                output2 = torch.unsqueeze(output2, 1)
                output3 = torch.unsqueeze(output3, 1)

                loss = 0.5 * F.smooth_l1_loss(
                    output1[mask], target_disp[mask],
                    size_average=True) + 0.7 * F.smooth_l1_loss(
                        output2[mask], target_disp[mask], size_average=True
                    ) + F.smooth_l1_loss(
                        output3[mask], target_disp[mask], size_average=True)
                flow2_EPE = self.epe(output3, target_disp)
            elif self.net_name == "gwcnet":
                mask = target_disp < self.maxdisp
                mask.detach_()

                output1, output2, output3, output4 = self.net(input_var)

                loss = 0.5 * F.smooth_l1_loss(
                    output1[mask], target_disp[mask],
                    size_average=True) + 0.5 * F.smooth_l1_loss(
                        output2[mask], target_disp[mask], size_average=True
                    ) + 0.7 * F.smooth_l1_loss(
                        output3[mask], target_disp[mask], size_average=True
                    ) + F.smooth_l1_loss(
                        output4[mask], target_disp[mask], size_average=True)
                flow2_EPE = self.epe(output3, target_disp)
            else:
                output = self.net(input_var)
                loss = self.criterion(output, target_disp)
                if type(loss) is list or type(loss) is tuple:
                    loss = np.sum(loss)
                if type(output) is list or type(output) is tuple:
                    flow2_EPE = self.epe(output[0], target_disp)
                else:
                    flow2_EPE = self.epe(output, target_disp)

            # record loss and EPE
            losses.update(loss.data.item(), input_var.size(0))
            if self.disp_on:
                flow2_EPEs.update(flow2_EPE.data.item(), input_var.size(0))
            if self.norm_on:
                if self.angle_on:
                    angle_EPEs.update(angle_EPE.data.item(), input_var.size(0))
                else:
                    norm_EPEs.update(norm_EPE.data.item(), input_var.size(0))

            # compute gradient and do SGD step
            loss.backward()
            self.optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i_batch % 10 == 0:
                logger.info(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                    'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
                    'EPE {flow2_EPE.val:.3f} ({flow2_EPE.avg:.3f})\t'
                    'norm_EPE {norm_EPE.val:.3f} ({norm_EPE.avg:.3f})\t'
                    'angle_EPE {angle_EPE.val:.3f} ({angle_EPE.avg:.3f})'.
                    format(epoch,
                           i_batch,
                           self.num_batches_per_epoch,
                           batch_time=batch_time,
                           data_time=data_time,
                           loss=losses,
                           flow2_EPE=flow2_EPEs,
                           norm_EPE=norm_EPEs,
                           angle_EPE=angle_EPEs))

            #if i_batch > 20:
            #    break

        return losses.avg, flow2_EPEs.avg
Exemple #14
0
def predict(args, processor, model, prefix=""):
    pred_output_dir = args.output_dir
    if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]:
        os.makedirs(pred_output_dir)
    test_dataset = NER_dataset(
        load_and_cache_examples(args, processor, data_type='test'),
        args.eval_max_seq_len)
    # Note that DistributedSampler samples randomly
    test_sampler = SequentialSampler(
        test_dataset) if args.local_rank == -1 else DistributedSampler(
            test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 sampler=test_sampler,
                                 batch_size=1,
                                 collate_fn=collate_fn)
    # Eval!
    logger.info("***** Running prediction %s *****", prefix)
    logger.info("  Num examples = %d", len(test_dataset))
    logger.info("  Batch size = %d", 1)
    results = []
    output_predict_file = os.path.join(pred_output_dir, prefix,
                                       "test_prediction.json")

    if isinstance(model, nn.DataParallel):
        model = model.module
    for step, batch in enumerate(test_dataloader):
        model.eval()
        batch = tuple(t.to(args.device) for t in batch)
        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "input_mask": batch[1],
                "labels": None,
                'input_lens': batch[3]
            }
            outputs = model(**inputs)
            logits = outputs[0]
            tags = model.crf.decode(logits, inputs['input_mask'])
            tags = tags.squeeze(0).cpu().numpy().tolist()
        preds = tags[0]
        label_entities = get_entity_bios(preds, processor.idx2label)
        json_d = {}
        json_d['id'] = step
        json_d['tag_seq'] = " ".join([processor.idx2label[x] for x in preds])
        json_d['entities'] = label_entities
        results.append(json_d)
    logger.info("\n")
    with open(output_predict_file, "w") as writer:
        for record in results:
            writer.write(json.dumps(record) + '\n')
    output_submit_file = os.path.join(pred_output_dir, prefix,
                                      "test_submit.json")
    test_text = []
    with open(os.path.join(args.data_path, "test.json"), 'r') as fr:
        for line in fr:
            test_text.append(json.loads(line))
    test_submit = []
    for x, y in zip(test_text, results):
        json_d = {}
        json_d['id'] = x['id']
        json_d['label'] = {}
        entities = y['entities']
        words = list(x['text'])
        if len(entities) != 0:
            for subject in entities:
                tag = subject[0]
                start = subject[1]
                end = subject[2]
                word = "".join(words[start:end + 1])
                if tag in json_d['label']:
                    if word in json_d['label'][tag]:
                        json_d['label'][tag][word].append([start, end])
                    else:
                        json_d['label'][tag][word] = [[start, end]]
                else:
                    json_d['label'][tag] = {}
                    json_d['label'][tag][word] = [[start, end]]
        test_submit.append(json_d)
    json_to_text(output_submit_file, test_submit)
Exemple #15
0
def evaluate(args, processor, model, prefix="", show_entity_info=False):
    metric = SeqEntityScore(processor.idx2label)
    eval_dataset = NER_dataset(load_and_cache_examples(args,
                                                       processor,
                                                       data_type='dev'),
                               max_seq_len=args.eval_max_seq_len)
    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
    # Note that DistributedSampler samples randomly
    eval_sampler = SequentialSampler(
        eval_dataset) if args.local_rank == -1 else DistributedSampler(
            eval_dataset)
    eval_dataloader = DataLoader(eval_dataset,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size,
                                 collate_fn=collate_fn)
    # Eval!
    logger.info("***** Running evaluation %s *****", prefix)
    logger.info("  Num examples = %d", len(eval_dataset))
    logger.info("  Batch size = %d", args.eval_batch_size)
    eval_loss = 0.0
    nb_eval_steps = 0
    if isinstance(model, nn.DataParallel):
        model = model.module
    for step, batch in enumerate(eval_dataloader):
        model.eval()
        batch = tuple(t.to(args.device) for t in batch)
        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "input_mask": batch[1],
                "labels": batch[2],
                'input_lens': batch[3]
            }
            outputs = model(**inputs)
            tmp_eval_loss, logits = outputs[:2]
            tags = model.crf.decode(logits, inputs['input_mask'])
        if args.n_gpu > 1:
            # mean() to average on multi-gpu parallel evaluating
            tmp_eval_loss = tmp_eval_loss.mean()
        eval_loss += tmp_eval_loss.item()
        nb_eval_steps += 1
        out_label_ids = inputs['labels'].cpu().numpy().tolist()
        input_lens = inputs['input_lens'].cpu().numpy().tolist()
        tags = tags.squeeze(0).cpu().numpy().tolist()
        for i, label in enumerate(out_label_ids):
            temp_1 = []
            temp_2 = []
            for j, m in enumerate(label):
                if j == input_lens[i] - 1:
                    metric.update(pred_paths=[temp_2], label_paths=[temp_1])
                    break
                else:
                    temp_1.append(processor.idx2label[out_label_ids[i][j]])
                    temp_2.append(processor.idx2label[tags[i][j]])
    eval_loss = eval_loss / nb_eval_steps
    eval_info, entity_info = metric.result()
    results = {f'{key}': value for key, value in eval_info.items()}
    results['loss'] = eval_loss
    info = "Eval results" + "-".join(
        [f' {key}: {value:.4f} ' for key, value in results.items()])
    logger.info(info)
    if show_entity_info:
        logger.info("***** Entity results %s *****", prefix)
        for key in sorted(entity_info.keys()):
            logger.info("******* %s results ********" % key)
            info = "-".join([
                f' {key}: {value:.4f} '
                for key, value in entity_info[key].items()
            ])
            logger.info(info)
    return results
Exemple #16
0
def on_mqtt_connected(*args):
    logger.info(f'Connected to the MQTT broker - {args}')
Exemple #17
0
import tornado.ioloop
import tornado.netutil
import tornado.process

from utils.common import config
from utils.common import logger


def make_app():
    return tornado.web.Application([
        (r"/", tabtran.MainHandler),
        (r"/.*", tabtran.ViewHandler),
    ])


if __name__ == "__main__":
    port = config['common']['port']
    thread = config['common']['thread']
    app = make_app()
    logger.info("Start to run tornado.... ")
    try:
        sockets = tornado.netutil.bind_sockets(port)
        tornado.process.fork_processes(int(thread))
        server = tornado.httpserver.HTTPServer(app)
        server.add_sockets(sockets)
        logger.info("Tornado is running.... ")
        tornado.ioloop.IOLoop.instance().start()
    except:
        logger.info("Tornado cant be started.... ")
        traceback.print_exc()
Exemple #18
0
    def train_one_epoch(self, epoch):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        flow2_EPEs = AverageMeter()
        # switch to train mode
        self.net.train()
        end = time.time()
        cur_lr = self.adjust_learning_rate(epoch)
        logger.info("learning rate of epoch %d: %f." % (epoch, cur_lr))
        for i_batch, sample_batched in enumerate(self.train_loader):
         
            left_input = torch.autograd.Variable(sample_batched['img_left'].cuda(), requires_grad=False)
            right_input = torch.autograd.Variable(sample_batched['img_right'].cuda(), requires_grad=False)
            target = sample_batched['gt_disp']
            #left_input = torch.autograd.Variable(sample_batched[0].cuda(), requires_grad=False)
            #right_input = torch.autograd.Variable(sample_batched[1].cuda(), requires_grad=False)
            #target = sample_batched[2]

            input = torch.cat((left_input, right_input), 1)
            target = target.cuda()

            input_var = torch.autograd.Variable(input, requires_grad=False)
            target_var = torch.autograd.Variable(target, requires_grad=False)
            data_time.update(time.time() - end)

            self.optimizer.zero_grad()
            if self.net_name == "dispnetcres":
                output_net1, output_net2 = self.net(input_var)
                loss_net1 = self.criterion(output_net1, target_var)
                loss_net2 = self.criterion(output_net2, target_var)
                loss = loss_net1 + loss_net2
                output_net2_final = output_net2[0]
                flow2_EPE = self.epe(output_net2_final, target_var)
            elif self.net_name == "dispnetcs":
                output_net1, output_net2 = self.net(input_var)
                loss_net1 = self.criterion(output_net1, target_var)
                loss_net2 = self.criterion(output_net2, target_var)
                loss = loss_net1 + loss_net2
                output_net2_final = output_net2[0]
                flow2_EPE = self.epe(output_net2_final, target_var)
            elif self.net_name == "dispnetcss":
                output_net1, output_net2, output_net3 = self.net(input_var)
                loss_net1 = self.criterion(output_net1, target_var)
                loss_net2 = self.criterion(output_net2, target_var)
                loss_net3 = self.criterion(output_net3, target_var)
                loss = loss_net1 + loss_net2 + loss_net3
                output_net3_final = output_net3[0]
                flow2_EPE = self.epe(output_net3_final, target_var)
            elif self.net_name == "psmnet" or self.net_name == "ganet":
                mask = target_var < self.maxdisp
                mask.detach_()

                output1, output2, output3 = self.net(input_var)
                output1 = torch.unsqueeze(output1,1)
                output2 = torch.unsqueeze(output2,1)
                output3 = torch.unsqueeze(output3,1)

                loss = 0.5*F.smooth_l1_loss(output1[mask], target_var[mask], size_average=True) + 0.7*F.smooth_l1_loss(output2[mask], target_var[mask], size_average=True) + F.smooth_l1_loss(output3[mask], target_var[mask], size_average=True)
                flow2_EPE = self.epe(output3, target_var)
            else:
                output = self.net(input_var)
                loss = self.criterion(output, target_var)
                if type(loss) is list or type(loss) is tuple:
                    loss = np.sum(loss)
                if type(output) is list or type(output) is tuple:
                    flow2_EPE = self.epe(output[0], target_var)
                else:
                    flow2_EPE = self.epe(output, target_var)

            # record loss and EPE
            losses.update(loss.data.item(), target.size(0))
            flow2_EPEs.update(flow2_EPE.data.item(), target.size(0))

            # compute gradient and do SGD step
            loss.backward()
            self.optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i_batch % 10 == 0:
                logger.info('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
                  'EPE {flow2_EPE.val:.3f} ({flow2_EPE.avg:.3f})'.format(
                  epoch, i_batch, self.num_batches_per_epoch, batch_time=batch_time, 
                  data_time=data_time, loss=losses, flow2_EPE=flow2_EPEs))

            #if i_batch > 10:
            #    break

        return losses.avg, flow2_EPEs.avg
Exemple #19
0
                                    np.array([w, h, w, h])).astype(
                                        np.int).tolist()
                                cv2.rectangle(frame, (_x, _y),
                                              (_x + _w, _y + _h), (0, 255, 0),
                                              2)
                                cv2.putText(
                                    frame,
                                    f'{r["label"]} {round(r["confidence"] * 100, 1)} %',
                                    (_x, _y - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, (0, 255, 0), 1)
                            cv2.imwrite(f_name, frame)
                            self._last_saved_time[vid_src_id] = time.time()


def on_mqtt_connected(*args):
    logger.info(f'Connected to the MQTT broker - {args}')


def on_mqtt_message(*args):
    # topic = args[2].topic
    # msg = args[2].payload.decode('utf-8')
    logger.info(f'Received a message - {args}')


if __name__ == '__main__':

    logger.info('========== Staring Viso OD Service ==========')

    od = VisoODService()
    od.start()
Exemple #20
0
    def run(self):
        # Download the flow from NodeRed container and parse.
        s_time = time.time()
        while True:
            if self._parse_nodered_flow():
                break
            else:
                if time.time() - s_time > NODERED_FLOW_TIMEOUT * 60:
                    logger.critical('Failed to download flow, exiting...')
                    return
                else:
                    time.sleep(1)

        s_time = time.time()
        model_name = self.config.get('model_name')
        detect_mode = str(self.config.get('detect_mode', '')).lower()
        model_url = self.config.get('custom_model_url')
        while True:
            model_dir = download_model(
                model_name=model_name,
                device=detect_mode,
                model_url=model_url
                if self.config.get('public_model', True) is False else None)
            if model_dir:
                break
            else:
                if time.time() - s_time > DOWNLOAD_MODEL_TIMEOUT * 60:
                    logger.critical('Failed to download model, existing...')
                    return
                else:
                    time.sleep(.1)

        if detect_mode == 'gpu':
            if model_name == 'yolov3':
                from utils.object_detect_gpu_yolov3 import VisoGPUODYoloV3
                detector = VisoGPUODYoloV3(model_dir=model_dir)
            else:
                from utils.object_detect_gpu import VisoGPUOD
                detector = VisoGPUOD(model_dir=model_dir)
        else:
            if model_name == 'yolov3':
                from utils.openvino_detect_yolov3 import OpenVINODetectYOLOV3
                detector = OpenVINODetectYOLOV3(
                    model_dir=model_dir,
                    device='MYRIAD' if detect_mode == 'ncs' else 'CPU')
            else:
                if model_url:
                    from utils.object_detect_gpu import VisoGPUOD
                    detector = VisoGPUOD(model_dir=model_dir)
                else:
                    from utils.openvino_detect import OpenVinoObjectDetect
                    detector = OpenVinoObjectDetect(
                        model_dir=model_dir,
                        device='MYRIAD' if detect_mode == 'ncs' else 'CPU')

        tracking_mode = str(self.config.get('tracking_algorithm')).upper()
        tracking_quality = float(self.config.get('tracking_quality', 5))
        tracking_cycle = int(self.config.get('tracking_cycle', 2))

        trackers = []
        cnts = []
        for vid_src_id in range(len(self.sources)):
            trackers.append(
                ObjectTrack(trk_type=tracking_mode,
                            good_track_quality=tracking_quality))
            cnts.append(0)

        logger.info("Starting detection loop...")
        r = redis.StrictRedis()
        while True:
            for vid_src_id, src in enumerate(self.sources):
                str_frame = r.get(f"{REDIS_PREFIX}_{src.get('id')}")
                if str_frame:
                    str_frame = base64.b64decode(str_frame)
                    frame = cv2.imdecode(
                        np.fromstring(str_frame, dtype=np.uint8), -1)
                    h, w = frame.shape[:2]
                    if cnts[vid_src_id] % tracking_cycle == 0:
                        result = detector.detect_frame(frame)
                        filtered_objects = [
                            r for r in result
                            if r['label'] in self.config.get('labels', [])
                        ]
                        # FIXME: Remove this!
                        if filtered_objects:
                            logger.debug(filtered_objects)
                        cnts[vid_src_id] = 0
                        trackers[vid_src_id].upgrade_trackers(
                            dets=filtered_objects, trk_img=frame)
                    else:
                        trackers[vid_src_id].keep_trackers(trk_img=frame)
                    cnts[vid_src_id] += 1

                    result = trackers[vid_src_id].to_list()

                    roi_result = [
                        x for x in result if not src.get('roi_list', [])
                        or  # ROI is not defined?
                        any([
                            cv2.pointPolygonTest(
                                np.array([cnt], dtype=np.int32),
                                ((x['rect'][0] + x['rect'][2] // 2) * w,
                                 (x['rect'][1] + x['rect'][3] // 2) *
                                 h),  # Center point
                                False) >= 0 for cnt in src.get('roi_list', [])
                        ])
                    ]
                    if roi_result:
                        logger.info(
                            f'Detected Object from {src.get("id")}({src.get("name")}) - {roi_result}'
                        )
                        client.publish(
                            topic=f"{MQTT_PREFIX}_{self.config['id']}",
                            payload=json.dumps({
                                "camera_id": src.get("id"),
                                "result": roi_result
                            }))
                        if SAVE_IMAGE and time.time(
                        ) - self._last_saved_time[vid_src_id] > 60:
                            f_name = os.path.join(
                                SAVE_PATH,
                                f"{src.get('id')}_{datetime.datetime.now().isoformat()}.jpg"
                            )
                            logger.debug(f"Saving to a file - {f_name}")
                            for r in roi_result:
                                _x, _y, _w, _h = (
                                    np.array(r['rect']) *
                                    np.array([w, h, w, h])).astype(
                                        np.int).tolist()
                                cv2.rectangle(frame, (_x, _y),
                                              (_x + _w, _y + _h), (0, 255, 0),
                                              2)
                                cv2.putText(
                                    frame,
                                    f'{r["label"]} {round(r["confidence"] * 100, 1)} %',
                                    (_x, _y - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, (0, 255, 0), 1)
                            cv2.imwrite(f_name, frame)
                            self._last_saved_time[vid_src_id] = time.time()
Exemple #21
0
    def validate(self):
        batch_time = AverageMeter()
        flow2_EPEs = AverageMeter()
        norm_EPEs = AverageMeter()
        angle_EPEs = AverageMeter()
        losses = AverageMeter()
        # switch to evaluate mode
        end = time.time()
        valid_norm = 0
        angle_lt = 0
        angle_thres = 11.25
        self.net.eval()
        for i, sample_batched in enumerate(self.test_loader):

            left_input = sample_batched['img_left'].cuda()
            right_input = sample_batched['img_right'].cuda()
            left_input = F.interpolate(left_input,
                                       self.scale_size,
                                       mode='bilinear')
            right_input = F.interpolate(right_input,
                                        self.scale_size,
                                        mode='bilinear')

            input_var = torch.cat((left_input, right_input), 1)
            #input_var = torch.autograd.Variable(inputs, requires_grad=False)

            if self.disp_on:
                target_disp = sample_batched['gt_disp']
                target_disp = target_disp.cuda()
                target_disp = torch.autograd.Variable(target_disp,
                                                      requires_grad=False)
            if self.norm_on:
                if self.angle_on:
                    target_angle = sample_batched['gt_angle']
                    target_angle = target_angle.cuda()
                    target_angle = torch.autograd.Variable(target_angle,
                                                           requires_grad=False)
                else:
                    target_norm = sample_batched['gt_norm']
                    target_norm = target_norm.cuda()
                    target_norm = torch.autograd.Variable(target_norm,
                                                          requires_grad=False)

            if self.net_name in ['dnfusionnet', 'dtonnet']:
                with torch.no_grad():
                    disp, normal = self.net(input_var)

                # scale the result
                disp_norm = torch.cat((normal, disp), 1)

                # upsampling the predicted disparity map
                size = target_disp.size()
                disp_norm = scale_norm(disp_norm,
                                       (size[0], 4, size[-2], size[-1]), True)

                disp = disp_norm[:, 3, :, :].unsqueeze(1)
                normal = disp_norm[:, :3, :, :]

                valid_norm_idx = (target_norm >= -1.0) & (target_norm <= 1.0)

                norm_EPE = F.mse_loss(normal[valid_norm_idx],
                                      target_norm[valid_norm_idx],
                                      size_average=True) * 3.0

                flow2_EPE = self.epe(disp, target_disp)
                norm_angle = angle_diff_norm(normal, target_norm).squeeze()

                valid_angle_idx = valid_norm_idx[:,
                                                 0, :, :] & valid_norm_idx[:,
                                                                           1, :, :] & valid_norm_idx[:,
                                                                                                     2, :, :]
                valid_angle_idx = valid_angle_idx.squeeze()

                angle_EPE = torch.mean(norm_angle[valid_angle_idx])

                valid_norm += float(torch.sum(valid_angle_idx))
                angle_lt += float(
                    torch.sum(norm_angle[valid_angle_idx] < angle_thres))

                logger.info('percent of < {}: {}.'.format(
                    angle_thres, angle_lt * 1.0 / valid_norm))

                angle_EPE = torch.mean(norm_angle)
                loss = norm_EPE + flow2_EPE

            elif self.net_name in ["normnets"]:
                normal = self.net(input_var)
                size = normal.size()

                # scale the result
                normal = scale_norm(
                    normal, (size[0], 3, self.img_height, self.img_width),
                    True)

                valid_norm_idx = (target_norm >= -1.0) & (target_norm <= 1.0)
                norm_EPE = F.mse_loss(normal[valid_norm_idx],
                                      target_norm[valid_norm_idx],
                                      size_average=True) * 3.0

                norm_angle = angle_diff_norm(normal, target_norm).squeeze()
                valid_angle_idx = valid_norm_idx[:,
                                                 0, :, :] & valid_norm_idx[:,
                                                                           1, :, :] & valid_norm_idx[:,
                                                                                                     2, :, :]
                valid_angle_idx = valid_angle_idx.squeeze()

                angle_EPE = torch.mean(norm_angle[valid_angle_idx])
                valid_norm += float(torch.sum(valid_angle_idx))
                angle_lt += float(
                    torch.sum(norm_angle[valid_angle_idx] < angle_thres))

                logger.info('percent of < {}: {}.'.format(
                    angle_thres, angle_lt * 1.0 / valid_norm))
                loss = norm_EPE
            elif self.net_name == 'fadnet':
                output_net1, output_net2 = self.net(input_var)
                output_net1 = scale_disp(output_net1,
                                         (output_net1.size()[0],
                                          self.img_size[0], self.img_size[1]))
                output_net2 = scale_disp(output_net2,
                                         (output_net2.size()[0],
                                          self.img_size[0], self.img_size[1]))

                loss_net1 = self.epe(output_net1, target_disp)
                loss_net2 = self.epe(output_net2, target_disp)
                loss = loss_net1 + loss_net2
                flow2_EPE = self.epe(output_net2, target_disp)
            elif self.net_name == "psmnet" or self.net_name == "gwcnet":
                with torch.no_grad():
                    output_net3 = self.net(input_var)
                if output_net3.dim == 3:
                    output_net3 = output_net3.unsqueeze(1)
                output_net3 = scale_disp(output_net3,
                                         (output_net3.size()[0],
                                          self.img_size[0], self.img_size[1]))
                loss = self.epe(output_net3, target_disp)
                flow2_EPE = loss
            elif self.net_name == 'dispnetcss':
                output_net1, output_net2, output_net3 = self.net(input_var)
                output_net1 = scale_disp(output_net1,
                                         (output_net1.size()[0],
                                          self.img_size[0], self.img_size[1]))
                output_net2 = scale_disp(output_net2,
                                         (output_net2.size()[0],
                                          self.img_size[0], self.img_size[1]))
                output_net3 = scale_disp(output_net3,
                                         (output_net3.size()[0],
                                          self.img_size[0], self.img_size[1]))

                loss_net1 = self.epe(output_net1, target_disp)
                loss_net2 = self.epe(output_net2, target_disp)
                loss_net3 = self.epe(output_net3, target_disp)
                loss = loss_net1 + loss_net2 + loss_net3
                flow2_EPE = self.epe(output_net3, target_disp)
            else:
                output = self.net(input_var)[0]

                output = scale_disp(
                    output,
                    (output.size()[0], self.img_size[0], self.img_size[1]))
                loss = self.epe(output, target_disp)
                flow2_EPE = loss

            # record loss and EPE
            if loss.data.item() == loss.data.item():
                losses.update(loss.data.item(), input_var.size(0))
            if self.disp_on and (flow2_EPE.data.item()
                                 == flow2_EPE.data.item()):
                flow2_EPEs.update(flow2_EPE.data.item(), input_var.size(0))
            if self.norm_on:
                if self.angle_on:
                    if (angle_EPE.data.item() == angle_EPE.data.item()):
                        angle_EPEs.update(angle_EPE.data.item(),
                                          input_var.size(0))
                else:
                    if (norm_EPE.data.item() == norm_EPE.data.item()):
                        norm_EPEs.update(norm_EPE.data.item(),
                                         input_var.size(0))
                    if (angle_EPE.data.item() == angle_EPE.data.item()):
                        angle_EPEs.update(angle_EPE.data.item(),
                                          input_var.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 1 == 0:
                logger.info(
                    'Test: [{0}/{1}]\t Time {2}\t EPE {3}\t norm_EPE {4}\t angle_EPE {5}'
                    .format(i, len(self.test_loader), batch_time.val,
                            flow2_EPEs.val, norm_EPEs.val, angle_EPEs.val))

        logger.info(' * EPE {:.3f}'.format(flow2_EPEs.avg))
        logger.info(' * normal EPE {:.3f}'.format(norm_EPEs.avg))
        logger.info(' * angle EPE {:.3f}'.format(angle_EPEs.avg))
        return flow2_EPEs.avg
Exemple #22
0
def train(args, processor, model):
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
    train_dataset = NER_dataset(
        load_and_cache_examples(args, processor, data_type='train'),
        args.train_max_seq_len)
    train_sampler = RandomSampler(
        train_dataset) if args.local_rank == -1 else DistributedSampler(
            train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size,
                                  collate_fn=collate_fn)
    t_total = len(train_dataloader) * args.epoch

    transformer_param_optimizer = list(model.transformer.parameters())
    crf_param_optimizer = list(model.crf.parameters())
    linear_param_optimizer = list(model.out_fc.parameters())

    optimizer_grouped_parameters = [
        {
            'params': transformer_param_optimizer,
            'lr': args.learning_rate
        },
        {
            'params': crf_param_optimizer,
            'lr': args.crf_learning_rate
        },
        {
            'params': linear_param_optimizer,
            'lr': args.crf_learning_rate
        },
    ]
    args.warmup_steps = int(t_total * args.warmup_rate)
    if args.optim == 'sgd:':
        optimizer = optim.SGD(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              momentum=args.momentum_rate)
    elif args.optim == 'adam':
        optimizer = optim.Adam(optimizer_grouped_parameters,
                               lr=args.learning_rate)
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=args.warmup_steps,
        num_training_steps=t_total)

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)
    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            find_unused_parameters=True)
    # Train!
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.epoch)
    logger.info("  Instantaneous batch size per GPU = %d",
                args.per_gpu_train_batch_size)
    logger.info(
        "  Total train batch size (w. parallel, distributed) = %d",
        args.train_batch_size *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
    )
    logger.info("  Total optimization steps = %d", t_total)

    global_step = 0
    steps_trained_in_current_epoch = 0

    best_f1 = 0.0
    tr_loss = 0.0
    model.zero_grad()
    # Added here for reproductibility (even between python 2 and 3)
    seed_everything(args.seed)
    for index in range(int(args.epoch)):
        for step, batch in enumerate(train_dataloader):
            # Skip past any already trained steps if resuming training
            if steps_trained_in_current_epoch > 0:
                steps_trained_in_current_epoch -= 1
                continue
            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {
                "input_ids": batch[0],
                "input_mask": batch[1],
                "labels": batch[2],
                'input_lens': batch[3]
            }
            outputs = model(**inputs)
            # model outputs are always tuple in pytorch-transformers (see doc)
            loss = outputs[0]
            if args.n_gpu > 1:
                loss = loss.mean(
                )  # mean() to average on multi-gpu parallel training
            loss.backward()
            tr_loss += loss.item()
            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           args.max_grad_norm)
            scheduler.step()  # Update learning rate schedule
            optimizer.step()
            model.zero_grad()
            if global_step % args.log_steps == 0:
                logger.info(
                    "training porcess —— epoch:%d —— global_step-%d —— loss-%.4f"
                    % (index + 1, global_step + 1, loss.item()))
            global_step += 1
        if args.local_rank in [-1, 0]:
            # Log metrics
            print(" ")
            if args.local_rank == -1:
                # Only evaluate when single GPU otherwise metrics may not average well
                eval_results = evaluate(args, processor, model)
                if eval_results['f1'] > best_f1:
                    logger.info(
                        f"\nEpoch {index+1}: eval_f1 improved from {best_f1} to {eval_results['f1']}"
                    )
                    output_dir = os.path.join(args.output_dir, "best_model")
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = (
                        model.module if hasattr(model, "module") else model
                    )  # Take care of distributed/parallel training
                    torch.save(model_to_save.state_dict(),
                               os.path.join(output_dir, "pytorch_model.bin"))
                    torch.save(args,
                               os.path.join(output_dir, "training_args.bin"))
                    logger.info("Saving model checkpoint to %s", output_dir)
                    best_f1 = eval_results['f1']
        if 'cuda' in str(args.device):
            torch.cuda.empty_cache()
    return global_step, tr_loss / global_step
Exemple #23
0
def on_mqtt_message(*args):
    # topic = args[2].topic
    # msg = args[2].payload.decode('utf-8')
    logger.info(f'Received a message - {args}')
Exemple #24
0
    def validate(self):
        batch_time = AverageMeter()
        flow2_EPEs = AverageMeter()
        losses = AverageMeter()
        # switch to evaluate mode
        self.net.eval()
        end = time.time()
        #scale_width = 960
        #scale_height = 540
        #scale_width = 3130
        #scale_height = 960
        for i, sample_batched in enumerate(self.test_loader):

            left_input = torch.autograd.Variable(sample_batched['img_left'].cuda(), requires_grad=False)
            right_input = torch.autograd.Variable(sample_batched['img_right'].cuda(), requires_grad=False)
            target = sample_batched['gt_disp']
            #left_input = torch.autograd.Variable(sample_batched[0].cuda(), requires_grad=False)
            #right_input = torch.autograd.Variable(sample_batched[1].cuda(), requires_grad=False)
            #target = sample_batched[2]

            input = torch.cat((left_input, right_input), 1)
            target = target.cuda()
            input_var = torch.autograd.Variable(input, requires_grad=False)
            target_var = torch.autograd.Variable(target, requires_grad=False)

            if self.net_name == 'dispnetcres':
                output_net1, output_net2 = self.net(input_var)
                output_net1 = scale_disp(output_net1, (output_net1.size()[0], 540, 960))
                output_net2 = scale_disp(output_net2, (output_net2.size()[0], 540, 960))

                loss_net1 = self.epe(output_net1, target_var)
                loss_net2 = self.epe(output_net2, target_var)
                loss = loss_net1 + loss_net2
                flow2_EPE = self.epe(output_net2, target_var)
            elif self.net_name == "psmnet" or self.net_name == "ganet":
                output_net3 = self.net(input_var)
                output_net3 = scale_disp(output_net3, (output_net3.size()[0], 540, 960))
                loss = self.epe(output_net3, target_var)
                flow2_EPE = loss
            elif self.net_name == 'dispnetcss':
                output_net1, output_net2, output_net3 = self.net(input_var)
                output_net1 = scale_disp(output_net1, (output_net1.size()[0], 540, 960))
                output_net2 = scale_disp(output_net2, (output_net2.size()[0], 540, 960))
                output_net3 = scale_disp(output_net3, (output_net3.size()[0], 540, 960))

                loss_net1 = self.epe(output_net1, target_var)
                loss_net2 = self.epe(output_net2, target_var)
                loss_net3 = self.epe(output_net3, target_var)
                loss = loss_net1 + loss_net2 + loss_net3
                flow2_EPE = self.epe(output_net3, target_var)
            else:
                output = self.net(input_var)
                output_net1 = output[0]
                #output_net1 = output_net1.squeeze(1)
                #print(output_net1.size())
                output_net1 = scale_disp(output_net1, (output_net1.size()[0], 540, 960))
                #output_net1 = torch.from_numpy(output_net1).unsqueeze(1).cuda()
                loss = self.epe(output_net1, target_var)
                flow2_EPE = self.epe(output_net1, target_var)

                #if type(loss) is list or type(loss) is tuple:
                #    loss = loss[0]
                #if type(output) is list or type(output_net1) :
                #    flow2_EPE = self.epe(output[0], target_var)
                #else:
                #    flow2_EPE = self.epe(output, target_var)

            # record loss and EPE
            if loss.data.item() == loss.data.item():
                losses.update(loss.data.item(), target.size(0))
            if flow2_EPE.data.item() == flow2_EPE.data.item():
                flow2_EPEs.update(flow2_EPE.data.item(), target.size(0))
            
            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
    
            if i % 10 == 0:
                logger.info('Test: [{0}/{1}]\t Time {2}\t EPE {3}'
                      .format(i, len(self.test_loader), batch_time.val, flow2_EPEs.val))
        logger.info(' * EPE {:.3f}'.format(flow2_EPEs.avg))
        return flow2_EPEs.avg
Exemple #25
0
    def validate(self):
        batch_time = AverageMeter()
        flow2_EPEs = AverageMeter()
        norm_EPEs = AverageMeter()
        angle_EPEs = AverageMeter()
        losses = AverageMeter()
        # switch to evaluate mode
        self.net.eval()
        end = time.time()
        for i, sample_batched in enumerate(self.test_loader):

            left_input = torch.autograd.Variable(
                sample_batched['img_left'].cuda(), requires_grad=False)
            right_input = torch.autograd.Variable(
                sample_batched['img_right'].cuda(), requires_grad=False)

            input = torch.cat((left_input, right_input), 1)
            input_var = torch.autograd.Variable(input, requires_grad=False)

            if self.disp_on:
                target_disp = sample_batched['gt_disp']
                target_disp = target_disp.cuda()
                target_disp = torch.autograd.Variable(target_disp,
                                                      requires_grad=False)
            if self.norm_on:
                if self.angle_on:
                    target_angle = sample_batched['gt_angle']
                    target_angle = target_angle.cuda()
                    target_angle = torch.autograd.Variable(target_angle,
                                                           requires_grad=False)
                else:
                    target_norm = sample_batched['gt_norm']
                    target_norm = target_norm.cuda()
                    target_norm = torch.autograd.Variable(target_norm,
                                                          requires_grad=False)

            if self.net_name in ["dispnormnet"]:
                disp, normal = self.net(input_var)
                size = disp.size()

                # scale the result
                disp_norm = torch.cat((normal, disp), 1)
                disp_norm = scale_norm(
                    disp_norm, (size[0], 4, self.img_height, self.img_width),
                    True)
                disp = disp_norm[:, 3, :, :].unsqueeze(1)
                normal = disp_norm[:, :3, :, :]

                # normalize the surface normal
                #normal = normal / torch.norm(normal, 2, dim=1, keepdim=True)

                valid_norm_idx = (target_norm >= -1.0) & (target_norm <= 1.0)

                norm_EPE = F.mse_loss(normal[valid_norm_idx],
                                      target_norm[valid_norm_idx],
                                      size_average=True) * 3.0

                flow2_EPE = self.epe(disp, target_disp)
                norm_angle = angle_diff_norm(normal, target_norm).squeeze()

                valid_angle_idx = valid_norm_idx[:,
                                                 0, :, :] & valid_norm_idx[:,
                                                                           1, :, :] & valid_norm_idx[:,
                                                                                                     2, :, :]
                valid_angle_idx = valid_angle_idx.squeeze()

                angle_EPE = torch.mean(norm_angle[valid_angle_idx])

                loss = norm_EPE + flow2_EPE

            elif self.net_name == 'dispnetcres':
                output_net1, output_net2 = self.net(input_var)
                output_net1 = scale_disp(output_net1,
                                         (output_net1.size()[0], 540, 960))
                output_net2 = scale_disp(output_net2,
                                         (output_net2.size()[0], 540, 960))

                loss_net1 = self.epe(output_net1, target_disp)
                loss_net2 = self.epe(output_net2, target_disp)
                loss = loss_net1 + loss_net2
                flow2_EPE = self.epe(output_net2, target_disp)
            elif self.net_name == 'dispnetcss':
                output_net1, output_net2, output_net3 = self.net(input_var)
                output_net1 = scale_disp(output_net1,
                                         (output_net1.size()[0], 540, 960))
                output_net2 = scale_disp(output_net2,
                                         (output_net2.size()[0], 540, 960))
                output_net3 = scale_disp(output_net3,
                                         (output_net3.size()[0], 540, 960))

                loss_net1 = self.epe(output_net1, target_disp)
                loss_net2 = self.epe(output_net2, target_disp)
                loss_net3 = self.epe(output_net3, target_disp)
                loss = loss_net1 + loss_net2 + loss_net3
                flow2_EPE = self.epe(output_net3, target_disp)
            else:
                output = self.net(input_var)
                output_net1 = output[0]
                output_net1 = scale_disp(
                    output_net1,
                    (output_net1.size()[0], self.img_height, self.img_width))
                loss = self.epe(output_net1, target_disp)
                flow2_EPE = self.epe(output_net1, target_disp)

            # record loss and EPE
            if loss.data.item() == loss.data.item():
                losses.update(loss.data.item(), input_var.size(0))
            if self.disp_on and (flow2_EPE.data.item()
                                 == flow2_EPE.data.item()):
                flow2_EPEs.update(flow2_EPE.data.item(), input_var.size(0))
            if self.norm_on:
                if self.angle_on:
                    if (angle_EPE.data.item() == angle_EPE.data.item()):
                        angle_EPEs.update(angle_EPE.data.item(),
                                          input_var.size(0))
                else:
                    if (norm_EPE.data.item() == norm_EPE.data.item()):
                        norm_EPEs.update(norm_EPE.data.item(),
                                         input_var.size(0))
                    if (angle_EPE.data.item() == angle_EPE.data.item()):
                        angle_EPEs.update(angle_EPE.data.item(),
                                          input_var.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:
                logger.info(
                    'Test: [{0}/{1}]\t Time {2}\t EPE {3}\t norm_EPE {4}\t angle_EPE {5}'
                    .format(i, len(self.test_loader), batch_time.val,
                            flow2_EPEs.val, norm_EPEs.val, angle_EPEs.val))

        logger.info(' * EPE {:.3f}'.format(flow2_EPEs.avg))
        logger.info(' * normal EPE {:.3f}'.format(norm_EPEs.avg))
        logger.info(' * angle EPE {:.3f}'.format(angle_EPEs.avg))
        return flow2_EPEs.avg
Exemple #26
0
    def get(self):
        tableau_redirect_server = config['common']['tableau_redirect_server']
        user_id_required = config['user_verify']['user_id_required']
        company_id_required = config['common']['company_id_required']
        use_cookie = config['common']['use_cookie']
        cookie_path_start = config['common']['cookie_path_start']
        path_start = config['common']['path_start']
        cookie_token_names = config['common']['cookie_token_name']
        exclude_sites = config["user_verify"]["exclude_site"]
        exclude_sites_list = exclude_sites.split(",")

        arguments = self.request.arguments
        error_code = ""
        path = self.request.path
        logger.info(
            "--------------------------- Start to get Url request : ---------------------------"
        )
        logger.info("Url path is : " + path)

        # path start verify
        real_path_start = None
        user_token = None
        if use_cookie == "True":
            if "," in cookie_token_names:
                token_list = cookie_token_names.split(",")
                for token_name_one in token_list:
                    user_token = self.get_cookie(token_name_one)
                    if user_token is not None and len(user_token) > 0:
                        # user_token.decode("utf-8")
                        user_token = user_token[len("bearer%20%20"
                                                    ):len(user_token)].strip()
                        break
            else:
                user_token = self.get_cookie(cookie_token_names)

            if user_token is None or len(user_token) < 1:
                logger.error("can not get cookie user token.")
                self.write("Please contact Admin.error code: 307")
                return
            if path.startswith(cookie_path_start) is False:
                logger.error(
                    "Cookie run not start with:{}".format(cookie_path_start))
                self.write("Please contact Admin.error code: 305")
                return
            else:
                real_path_start = cookie_path_start

        else:
            if path.startswith(path_start) is False:
                logger.error("path not start with:{}".format(path_start))
                self.write("Please contact Admin.error code: 305")
                return
            else:
                real_path_start = path_start

        #
        # get site
        site_name = path[len(real_path_start):path.index("/views/")]
        if site_name is None or len(site_name) < 1:
            logger.error("can not get site:{}".format(site_name))
            self.write("Please contact Admin.error code: 308")
            return

        # CompanyId verify
        logger.info("Start to verify company Id")
        if company_id_required == "True" and site_name not in exclude_sites:
            if "CompanyId" in arguments:  # 确认是否存在company_id参数
                company_id = self.get_argument("CompanyId")
                logger.info("CompanyId  verify succeed")
            else:
                logger.error("CompanyId not exist")
                self.write("Please contact Admin.error code: 301")
                return
        else:
            logger.info("CompanyId not required")
        logger.info("CompanyId verify finished")

        # user token verify

        logger.info("Start to verify user token")
        if user_id_required == "True" and site_name not in exclude_sites:
            if use_cookie != "True" and "token" in arguments:
                if "token" in arguments:
                    user_token = self.get_argument("token")
                else:
                    logger.error("can not get user token")
                    self.write("Please contact Admin.error code: 304")
                    return

            if token_verify(user_token) is True:  # token 值到用户库验证
                logger.info("user token verify succeed")
            else:
                logger.error("user token verify failed:{}".format(user_token))
                self.write("用户超时或者在其它方登录(303)")
                return
        else:
            logger.info("user token  not  required")
        logger.info("user token verify finished")

        params_str = '&'.join("{0}={1}".format(key, val[0].decode("utf-8"))
                              for (key, val) in arguments.items())
        file = os.path.dirname(os.path.dirname(os.path.abspath(
            __file__))) + os.sep + 'config' + os.sep + 'argu.json'
        file = open(file, 'r')
        data = json.JSONDecoder().decode(file.read())
        file.close()
        url_arguments = data['url_argu']
        params_str = params_str + '&' + '&'.join(argument
                                                 for argument in url_arguments)

        logger.info("Start to get ticket")
        ticket = get_ticket(site_name, 1)
        logger.info("Ticket get finished:{}".format(ticket))

        if ticket is None:
            self.write("Can not get right authorization ticket.")
        else:
            redirect_path = "/t/{}".format(
                path[len(real_path_start):len(path)])
            redirect_url = "{}/{}{}?{}".format(tableau_redirect_server, ticket,
                                               redirect_path, params_str)
            logger.info(redirect_url)
            self.redirect(redirect_url)

        logger.info(
            "--------------------------- Finish to get Url request : ---------------------------"
        )
Exemple #27
0
                                            batch_size=16,
                                            shuffle=False,
                                            num_workers=4,
                                            drop_last=False)

#TestImgLoader12 = torch.utils.data.DataLoader(
#         DA.myImageFloder(all_left_12,all_right_12,all_left_disp_12, False),
#         batch_size= 16, shuffle= False, num_workers= 4, drop_last=False)

devices = [int(item) for item in args.devices.split(',')]
ngpus = len(devices)

if args.model == 'dispnetcres':
    model = DispNetCSRes(ngpus, False, True)
else:
    logger.info('no model')

if args.cuda:
    model = nn.DataParallel(model, device_ids=devices)
    model.cuda()

if args.loadmodel is not None:
    state_dict = torch.load(args.loadmodel)
    model.load_state_dict(state_dict['state_dict'])

logger.info('Number of model parameters: {}'.format(
    sum([p.data.nelement() for p in model.parameters()])))

optimizer = optim.Adam(model.parameters(),
                       lr=0.1,
                       betas=(0.9, 0.999),