コード例 #1
0
ファイル: line.py プロジェクト: yuk12/dgl
    def fast_train_mp(self):
        """ multi-cpu-core or mix cpu & multi-gpu """
        self.init_device_emb()
        self.emb_model.share_memory()

        sum_up_params(self.emb_model)

        start_all = time.time()
        ps = []

        for i in range(len(self.args.gpus)):
            p = mp.Process(target=self.fast_train_sp,
                           args=(i, self.args.gpus[i]))
            ps.append(p)
            p.start()

        for p in ps:
            p.join()

        print("Used time: %.2fs" % (time.time() - start_all))
        if self.args.save_in_pt:
            self.emb_model.save_embedding_pt(self.dataset,
                                             self.args.output_emb_file)
        else:
            self.emb_model.save_embedding(self.dataset,
                                          self.args.output_emb_file)
コード例 #2
0
ファイル: deepwalk.py プロジェクト: trytodoit227/dgl
    def fast_train(self):
        """ fast train with dataloader with only gpu / only cpu"""
        # the number of postive node pairs of a node sequence
        num_pos = 2 * self.args.walk_length * self.args.window_size\
            - self.args.window_size * (self.args.window_size + 1)
        num_pos = int(num_pos)

        self.init_device_emb()

        if self.args.async_update:
            self.emb_model.share_memory()
            self.emb_model.create_async_update()

        if self.args.count_params:
            sum_up_params(self.emb_model)

        sampler = self.dataset.create_sampler(0)

        dataloader = DataLoader(
            dataset=sampler.seeds,
            batch_size=self.args.batch_size,
            collate_fn=sampler.sample,
            shuffle=False,
            drop_last=False,
            num_workers=self.args.num_sampler_threads,
        )

        num_batches = len(dataloader)
        print("num batchs: %d\n" % num_batches)

        start_all = time.time()
        start = time.time()
        with torch.no_grad():
            max_i = num_batches
            for i, walks in enumerate(dataloader):
                if self.args.fast_neg:
                    self.emb_model.fast_learn(walks)
                else:
                    # do negative sampling
                    bs = len(walks)
                    neg_nodes = torch.LongTensor(
                        np.random.choice(self.dataset.neg_table,
                                         bs * num_pos * self.args.negative,
                                         replace=True))
                    self.emb_model.fast_learn(walks, neg_nodes=neg_nodes)

                if i > 0 and i % self.args.print_interval == 0:
                    if self.args.print_loss:
                        print("Batch %d training time: %.2fs loss: %.4f" \
                            % (i, time.time()-start, -sum(self.emb_model.loss)/self.args.print_interval))
                        self.emb_model.loss = []
                    else:
                        print("Batch %d, training time: %.2fs" %
                              (i, time.time() - start))
                    start = time.time()

            if self.args.async_update:
                self.emb_model.finish_async_update()

        print("Training used time: %.2fs" % (time.time() - start_all))
        if self.args.save_in_txt:
            self.emb_model.save_embedding_txt(self.dataset,
                                              self.args.output_emb_file)
        elif self.args.save_in_pt:
            self.emb_model.save_embedding_pt(self.dataset,
                                             self.args.output_emb_file)
        else:
            self.emb_model.save_embedding(self.dataset,
                                          self.args.output_emb_file)
コード例 #3
0
ファイル: line.py プロジェクト: yuk12/dgl
    def fast_train(self):
        """ fast train with dataloader with only gpu / only cpu"""
        self.init_device_emb()

        if self.args.async_update:
            self.emb_model.share_memory()
            self.emb_model.create_async_update()

        sum_up_params(self.emb_model)

        sampler = self.dataset.create_sampler(0)

        dataloader = DataLoader(
            dataset=sampler.seeds,
            batch_size=self.args.batch_size,
            collate_fn=sampler.sample,
            shuffle=False,
            drop_last=False,
            num_workers=self.args.num_sampler_threads,
        )

        num_batches = len(dataloader)
        print("num batchs: %d\n" % num_batches)

        start_all = time.time()
        start = time.time()
        with torch.no_grad():
            for i, edges in enumerate(dataloader):
                if self.args.fast_neg:
                    self.emb_model.fast_learn(edges)
                else:
                    # do negative sampling
                    bs = edges.size()[0]
                    neg_nodes = torch.LongTensor(
                        np.random.choice(self.dataset.neg_table,
                                         bs * self.args.negative,
                                         replace=True))
                    self.emb_model.fast_learn(edges, neg_nodes=neg_nodes)

                if i > 0 and i % self.args.print_interval == 0:
                    if self.args.print_loss:
                        if self.args.only_fst:
                            print("Batch %d time: %.2fs fst-loss: %.4f" \
                                % (i, time.time()-start, -sum(self.emb_model.loss_fst)/self.args.print_interval))
                        elif self.args.only_snd:
                            print("Batch %d time: %.2fs snd-loss: %.4f" \
                                % (i, time.time()-start, -sum(self.emb_model.loss_snd)/self.args.print_interval))
                        else:
                            print("Batch %d time: %.2fs fst-loss: %.4f snd-loss: %.4f" \
                                % (i, time.time()-start, \
                                -sum(self.emb_model.loss_fst)/self.args.print_interval, \
                                -sum(self.emb_model.loss_snd)/self.args.print_interval))
                        self.emb_model.loss_fst = []
                        self.emb_model.loss_snd = []
                    else:
                        print("Batch %d, training time: %.2fs" %
                              (i, time.time() - start))
                    start = time.time()

            if self.args.async_update:
                self.emb_model.finish_async_update()

        print("Training used time: %.2fs" % (time.time() - start_all))
        if self.args.save_in_pt:
            self.emb_model.save_embedding_pt(self.dataset,
                                             self.args.output_emb_file)
        else:
            self.emb_model.save_embedding(self.dataset,
                                          self.args.output_emb_file)
コード例 #4
0
ファイル: deepwalk.py プロジェクト: zhoujf620/dgl
    def fast_train(self):
        """ fast train with dataloader """
        # the number of postive node pairs of a node sequence
        num_pos = 2 * self.args.walk_length * self.args.window_size\
            - self.args.window_size * (self.args.window_size + 1)
        num_pos = int(num_pos)

        self.init_device_emb()

        if self.args.count_params:
            sum_up_params(self.emb_model)

        sampler = self.dataset.create_sampler(0)

        dataloader = DataLoader(
            dataset=sampler.seeds,
            batch_size=self.args.batch_size,
            collate_fn=sampler.sample,
            shuffle=False,
            drop_last=False,
            num_workers=4,
        )

        num_batches = len(dataloader)
        print("num batchs: %d" % num_batches)

        start_all = time.time()
        start = time.time()
        with torch.no_grad():
            max_i = self.args.iterations * num_batches
            for iteration in range(self.args.iterations):
                print("\nIteration: " + str(iteration + 1))

                for i, walks in enumerate(dataloader):
                    # decay learning rate for SGD
                    lr = self.args.lr * (max_i - i) / max_i
                    if lr < 0.00001:
                        lr = 0.00001

                    if self.args.fast_neg:
                        self.emb_model.fast_learn(walks, lr)
                    else:
                        # do negative sampling
                        bs = len(walks)
                        neg_nodes = torch.LongTensor(
                            np.random.choice(self.dataset.neg_table,
                                             bs * num_pos * self.args.negative,
                                             replace=True))
                        self.emb_model.fast_learn(walks,
                                                  lr,
                                                  neg_nodes=neg_nodes)

                    if i > 0 and i % self.args.print_interval == 0:
                        if self.args.print_loss:
                            print("Batch %d training time: %.2fs loss: %.4f" \
                                % (i, time.time()-start, -sum(self.emb_model.loss)/self.args.print_interval))
                            self.emb_model.loss = []
                        else:
                            print("Batch %d, training time: %.2fs" %
                                  (i, time.time() - start))
                        start = time.time()

        print("Training used time: %.2fs" % (time.time() - start_all))
        if self.args.save_in_txt:
            self.emb_model.save_embedding_txt(self.dataset,
                                              self.args.output_emb_file)
        elif self.args.save_in_pt:
            self.emb_model.save_embedding_pt(self.dataset,
                                             self.args.output_emb_file)
        else:
            self.emb_model.save_embedding(self.dataset,
                                          self.args.output_emb_file)