def Dijkstra(num_nodes, mission, f_next, heuristic=None, num_controls=0):
    """Djikstra planner."""
    t = Timer()
    t.tic()
    
    unvis_node = -1
    previous = np.full(num_nodes, dtype=np.int, fill_value=unvis_node)
    cost_to_come = np.zeros(num_nodes)
    control_to_come = np.zeros((num_nodes, num_controls), dtype=np.int)

    startNode = mission['start']['id']
    goalNode = mission['goal']['id']

    #Priority queue based on the lower cost-to-go
    q = PriorityQueue()
    q.insert(0,startNode)
    foundPlan = False

    while not q.IsEmpty():
        x_ctc = q.pop()
        x = x_ctc[1]
        if x == goalNode:
            foundPlan = True
            break
        neighbours, u, d = f_next(x)
        for xi, ui, di in zip(neighbours, u, d):
            if previous[xi] == unvis_node or cost_to_come[xi] > cost_to_come[x] + di:
                previous[xi] = x
                cost_to_come[xi] = cost_to_come[x] + di
                q.insert(cost_to_come[xi],xi)
                if num_controls > 0:
                    control_to_come[xi] = ui

    # Recreate the plan by traversing previous from goal node
    if not foundPlan:
        return []
    else:
        plan = [goalNode]
        length = cost_to_come[goalNode]
        control = []
        while plan[0] != startNode:
            if num_controls > 0:
                control.insert(0, control_to_come[plan[0]])
            plan.insert(0, previous[plan[0]])

        return {'plan': plan,
                'length': length,
                'num_visited_nodes': np.sum(previous != unvis_node),
                'name': 'Djikstra',
                'time': t.toc(),
                'control': control,
                'visited_nodes': previous[previous != unvis_node]}
def DepthFirst(num_nodes, mission, f_next, heuristic=None, num_controls=0):
    """Depth first planner."""
    t = Timer()
    t.tic()
    
    unvis_node = -1
    previous = np.full(num_nodes, dtype=np.int, fill_value=unvis_node)
    cost_to_come = np.zeros(num_nodes)
    control_to_come = np.zeros((num_nodes, num_controls), dtype=np.int)

    startNode = mission['start']['id']
    goalNode = mission['goal']['id']

    q = LIFO()
    q.insert(startNode)
    foundPlan = False

    while not q.IsEmpty():
        x = q.pop()
        if x == goalNode:
            foundPlan = True
            break
        neighbours, u, d = f_next(x)
        for xi, ui, di in zip(neighbours, u, d):
            if previous[xi] == unvis_node:
                previous[xi] = x
                q.insert(xi)
                cost_to_come[xi] = cost_to_come[x] + di
                if num_controls > 0:
                    control_to_come[xi] = ui

    # Recreate the plan by traversing previous from goal node
    if not foundPlan:
        return []
    else:
        plan = [goalNode]
        length = cost_to_come[goalNode]
        control = []
        while plan[0] != startNode:
            if num_controls > 0:
                control.insert(0, control_to_come[plan[0]])
            plan.insert(0, previous[plan[0]])

        return {'plan': plan,
                'length': length,
                'num_visited_nodes': np.sum(previous != unvis_node),
                'name': 'DepthFirst',
                'time': t.toc(),
                'control': control,
                'visited_nodes': previous[previous != unvis_node]}
Ejemplo n.º 3
0
 def post(self, *args, **kwargs):
     method = self.get_argument("method", "POST")
     if method == "DELETE":
         self.delete(*args, **kwargs)
     elif method == "OPTIONS":
         self.options(*args, **kwargs)
     else:
         if config.get_config("requires_tos") and self.get_cookie("accepted_tos") != "true" and \
             self.get_argument("accepted_tos", "false") != "true":
             self.set_status(403)
             self.finish()
             return
         timer = Timer("Kernel handler for %s"%self.get_argument("notebook", uuid.uuid4()))
         proto = self.request.protocol.replace("http", "ws", 1)
         host = self.request.host
         ws_url = "%s://%s/" % (proto, host)
         km = self.application.km
         logger.info("Starting session: %s"%timer)
         timeout = self.get_argument("timeout", None)
         if timeout is not None:
             timeout = float(timeout)
             if math.isnan(timeout) or timeout<0:
                 timeout = None
         kernel_id = yield gen.Task(km.new_session_async,
                                    referer = self.request.headers.get('Referer',''),
                                    remote_ip = self.request.remote_ip,
                                    timeout = timeout)
         data = {"ws_url": ws_url, "id": kernel_id}
         self.write(self.permissions(data))
         self.set_cookie("accepted_tos", "true", expires_days=365)
         self.finish()
Ejemplo n.º 4
0
def loadOSMmap(xmlfile, figurefile):
    """Load OpenStreetMap XML file and parse into map object

    First time map object is saved into a file to speed-up next time.
    """
    _, osmFileName = os.path.split(xmlfile)

    if os.access(osmFileName + '.pickle', os.R_OK):
        osmMap = OpenStreetMap()
        osmMap.loadpickle(osmFileName + '.pickle')
    else:
        t = Timer()
        t.tic()
        print('Pre-parsed file does not exist, parsing XML file')
        print('This will take a while ...')
        osmMap = OpenStreetMap(xmlfile, figurefile)
        print('Done parsing the XML file in %.2f seconds\n' % t.toc())
        print('Saving data in file ' + osmFileName + '.pickle')
        osmMap.pickle(osmFileName + '.pickle')
    return osmMap
Ejemplo n.º 5
0
 def __init__(self, ip, tmp_dir):
     self.context = zmq.Context()
     self.dealer = self.context.socket(zmq.DEALER)
     self.port = self.dealer.bind_to_random_port("tcp://%s" % ip)
     print self.port
     sys.stdout.flush()
     self.sage_mode = self.setup_sage()
     print self.sage_mode
     sys.stdout.flush()
     self.km = UntrustedMultiKernelManager(
         ip, update_function=self.update_dict_with_sage, tmp_dir=tmp_dir)
     self.timer = Timer("", reset=True)
Ejemplo n.º 6
0
 def post(self, *args, **kwargs):
     method = self.get_argument("method", "POST")
     if method == "DELETE":
         self.delete(*args, **kwargs)
     elif method == "OPTIONS":
         self.options(*args, **kwargs)
     else:
         timer = Timer("Kernel handler for %s" %
                       self.get_argument("notebook", uuid.uuid4()))
         proto = self.request.protocol.replace("http", "ws", 1)
         host = self.request.host
         ws_url = "%s://%s/" % (proto, host)
         km = self.application.km
         logger.info("Starting session: %s" % timer)
         kernel_id = yield gen.Task(km.new_session_async)
         data = {"ws_url": ws_url, "kernel_id": kernel_id}
         self.write(self.permissions(data))
         self.finish()
Ejemplo n.º 7
0
def main():
    make_deterministic()

    # region Prepare data
    with Timer('\nData preparation time: %s\n'):
        ru_lang = Language()
        en_lang = Language()

        yandex = Yandex(
            'datasets/yandex/corpus.en_ru.1m.ru',
            'datasets/yandex/corpus.en_ru.1m.en',
            ru_lang,
            en_lang,
            data_slice=H.dataset_slice,
        )

        paracrawl = ParaCrawl(
            'datasets/paracrawl/en-ru.txt',
            ru_lang,
            en_lang,
            data_slice=slice(0),
        )

        low = ru_lang.lower_than(H.ru_word_count_minimum)
        infrequent_words_n = max(
            ceil(ru_lang.words_n * H.infrequent_words_percent), len(low))
        if infrequent_words_n > 0:
            ru_lang.drop_words(ru_lang.lowk(infrequent_words_n))
            print(
                f'{infrequent_words_n:,} infrequent Russian words are dropped')

        low = en_lang.lower_than(H.en_word_count_minimum)
        if len(low) > 0:
            en_lang.drop_words(*low)
            print(f'{len(low):,} infrequent English words are dropped')

        print(
            f'Russian language: {ru_lang.words_n:,} words, {ru_lang.sentence_length:,} words in a sentence'
        )
        print(
            f'English language: {en_lang.words_n:,} words, {en_lang.sentence_length:,} words in a sentence'
        )

        batch = H.batch_size
        dataset = ConcatDataset((yandex, paracrawl))
        loader = DataLoader(dataset, batch, shuffle=True)
    # endregion

    # region Models and optimizers
    model = Seq2Seq(
        Encoder(ru_lang.words_n, H.encoder_embed_dim, H.encoder_hidden_dim,
                H.encoder_bi, H.decoder_hd),
        Attention(H.encoder_hd, H.decoder_hd),
        Decoder(en_lang.words_n, H.decoder_embed_dim, H.decoder_hidden_dim,
                H.encoder_hd),
    ).to(Device).train()

    optimizer = Adam(model.parameters(), lr=H.learning_rate)
    criterion = CrossEntropyLoss(ignore_index=Token_PAD, reduction='sum')
    # endregion

    # region Training
    teaching_percent = H.teaching_percent
    total = len(dataset)
    log_interval = max(5, round(total / batch / 1000))

    for epoch in range(1, H.epochs + 1):
        with Printer() as printer:
            printer.print(f'Train epoch {epoch}: starting...')
            for i, ((ru, ru_l), en_sos, en_eos) in enumerate(loader, 1):
                # Zero the parameter gradients
                optimizer.zero_grad()
                # Run data through model
                predictions = model(ru, ru_l, en_sos, teaching_percent)
                # Calculate loss
                loss = criterion(predictions, en_eos)
                # Back propagate and perform optimization
                loss.backward()
                clip_grad_norm_(model.parameters(), H.gradient_norm_clip)
                optimizer.step()

                # Print log
                if i % log_interval == 0:
                    printer.print(
                        f'Train epoch {epoch}: {i * batch / total:.1%} [{i * batch:,}/{total:,}]'
                    )

            printer.print(f'Train epoch {epoch}: completed')
    # endregion

    torch.save(
        (
            ru_lang.__getnewargs__(),
            en_lang.__getnewargs__(),
            model.cpu().eval().data,
        ),
        'data/data.pt',
    )

    evaluate(model.to(Device), ru_lang, en_lang,
             'datasets/yandex/corpus.en_ru.1m.ru',
             slice(H.dataset_slice.stop + 1, H.dataset_slice.stop + 1 + 100))
Ejemplo n.º 8
0
import torch
import os
os.chdir("utils")
from misc import Timer
os.chdir("..")

num_training_loss = 0  #global variable
timer = Timer()


def train(loader,
          net,
          criterion,
          optimizer,
          device,
          checkpoint_folder,
          debug_steps=100,
          epoch=-1):
    global num_training_loss

    net.train(True)
    running_loss = 0.0
    running_regression_loss = 0.0
    running_classification_loss = 0.0

    for i, data in enumerate(loader):
        #timer.start()
        images, boxes, labels = data
        images = images.to(device)
        boxes = boxes.to(device)
        labels = labels.to(device)
Ejemplo n.º 9
0
    def train(self):
        """The function for the meta-train phase."""
        # Set the meta-train log
        trlog = {}
        trlog['args'] = vars(self.args)
        trlog['train_loss'] = []
        trlog['val_loss'] = []
        trlog['train_acc'] = []
        trlog['val_acc'] = []
        trlog['max_acc'] = 0.0
        trlog['max_acc_epoch'] = 0

        timer = Timer()
        # Generate the labels for train set of the episodes
        label_shot = torch.arange(self.args.way).repeat(self.args.shot).to(
            self.args.device).type(torch.long)
        label_query = torch.arange(self.args.way).repeat(
            self.args.train_query).to(self.args.device).type(torch.long)

        # Start meta-train
        for epoch in range(1, self.args.max_epoch + 1):
            ################### train #############################
            self.model.train()
            train_acc_averager = Averager()
            self.train_loader = DataLoader(dataset=self.trainset,
                                           batch_sampler=self.train_sampler,
                                           num_workers=self.args.num_work,
                                           pin_memory=True)
            train_data = self.inf_get(self.train_loader)
            self.val_loader = DataLoader(dataset=self.valset,
                                         batch_sampler=self.val_sampler,
                                         num_workers=self.args.num_work,
                                         pin_memory=True)
            val_data = self.inf_get(self.val_loader)
            acc_log = []
            tqdm_gen = tqdm.tqdm(
                range(self.args.num_batch // self.args.meta_batch))
            for i in tqdm_gen:
                data_list = []
                label_shot_list = []
                for _ in range(self.args.meta_batch):
                    data_list.append(train_data.__next__().to(
                        self.args.device))
                    label_shot_list.append(label_shot)
                pass
                data_list = torch.stack(data_list, dim=0)
                label_shot_list = torch.stack(label_shot_list,
                                              dim=0)  # shot-label
                out = self.model(data_list, label_shot_list)
                meta_loss = 0
                for inner_id in range(self.args.meta_batch):
                    meta_loss += F.cross_entropy(out[inner_id], label_query)
                    cur_acc = count_acc(out[inner_id], label_query)
                    acc_log.append(
                        (i * self.args.meta_batch + inner_id, cur_acc))
                    train_acc_averager.add(cur_acc)
                    tqdm_gen.set_description('Epoch {}, Acc={:.4f}'.format(
                        epoch, cur_acc))
                pass
                meta_loss /= self.args.meta_batch
                plot.plot('meta_loss', meta_loss.item())
                self.optimizer.zero_grad()
                meta_loss.backward()
                self.optimizer.step()
                plot.tick()
            pass
            train_acc_averager = train_acc_averager.item()
            trlog['train_acc'].append(train_acc_averager)
            plot.plot('train_acc_averager', train_acc_averager)
            plot.flush(self.args.save_path)

            ####################### eval ##########################
            #self.model.eval()    ###############################################################################
            val_acc_averager = Averager()
            for i in tqdm.tqdm(
                    range(self.args.val_batch // self.args.meta_batch)):
                data_list = []
                label_shot_list = []
                for _ in range(self.args.meta_batch):
                    data_list.append(val_data.__next__().to(self.args.device))
                    label_shot_list.append(label_shot)
                pass
                data_list = torch.stack(data_list, dim=0)
                label_shot_list = torch.stack(label_shot_list, dim=0)
                out = self.model(data_list, label_shot_list).detach()
                for inner_id in range(self.args.meta_batch):
                    cur_acc = count_acc(out[inner_id], label_query)
                    val_acc_averager.add(cur_acc)
                pass
            pass
            val_acc_averager = val_acc_averager.item()
            trlog['val_acc'].append(val_acc_averager)
            print('Epoch {}, Val, Acc={:.4f}'.format(epoch, val_acc_averager))

            # Update best saved model
            if val_acc_averager > trlog['max_acc']:
                trlog['max_acc'] = val_acc_averager
                trlog['max_acc_epoch'] = epoch
                self.save_model('max_acc')

            with open(osp.join(self.args.save_path, 'trlog.json'), 'w') as f:
                json.dump(trlog, f)
            if epoch % 10 == 0:
                self.save_model('epoch' + str(epoch))
                print('Running Time: {}, Estimated Time: {}'.format(
                    timer.measure(),
                    timer.measure(epoch / self.args.max_epoch)))
        pass
Ejemplo n.º 10
0
 def create_timer(self, _func=None):
     new_timer = Timer(func=_func)
     self.timers.append(new_timer)