コード例 #1
0
ファイル: getNrank.py プロジェクト: Caochy/MusicRecom
    def __init__(self, config_path, gpu, model_path):
        # gpu
        self.use_gpu = True
        if gpu is None:
            self.use_gpu = False
        else:
            self.use_gpu = True
            os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # config
        self.config = ConfigParser(config_path)

        # formatter
        useable_list = {
            # "AYYC": AYPredictionFormatter
            "DeepFM": DeepFMFormatter,
            "LRMM": MusicPairFormatter,
            "DeepInterest": DeepInterestFormatter,
            "NCF": NCFFormatter
        }
        if self.config.get("data", "formatter") in useable_list.keys():
            self.formatter = useable_list[self.config.get(
                "data", "formatter")](self.config)
        else:
            raise NotImplementedError
        task_loss_type = self.config.get("train", "type_of_loss")
        self.criterion = get_loss(task_loss_type)

        #model
        model_name = self.config.get("model", "name")
        net = get_model(model_name, self.config)
        device = []
        if torch.cuda.is_available() and self.use_gpu:
            net = net.cuda()
        net.load_state_dict(torch.load(model_path))
        self.net = net
        print_info("Net build done")
コード例 #2
0
ファイル: train.py プロジェクト: xcjthu/cv_predict_pay
args = parser.parse_args()

configFilePath = args.config
if configFilePath is None:
    print("python *.py\t--config/-c\tconfigfile")
use_gpu = True

if args.gpu is None:
    use_gpu = False
else:
    use_gpu = True
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

config = ConfigParser(configFilePath)

print_info("Start to build Net")

model_name = config.get("model", "name")
net = get_model(model_name, config)
print_info(model_name)

device = []
print_info("CUDA:%s" % str(torch.cuda.is_available()))
if torch.cuda.is_available() and use_gpu:
    device_list = args.gpu.split(",")
    for a in range(0, len(device_list)):
        device.append(int(a))

    print_info('begin .cuda()')
    net = net.cuda()
    print_info('cuda() successfully')
コード例 #3
0
ファイル: work.py プロジェクト: xcjthu/cv_predict_pay
            if cnt % output_time == 0:
                print('\r', end='', flush=True)
                print(
                    '%.4f   % 3d    |  %.4f         % 2.2f   |   ????           ?????   |  %s  | %d'
                    % (lr, epoch_num + 1, train_loss / train_cnt, train_acc /
                       train_cnt * 100, time_to_str((timer() - start)), cnt),
                    end='',
                    flush=True)

        train_loss /= train_cnt
        train_acc /= train_cnt

        # writer.add_scalar(config.get("output", "model_name") + " train loss", train_loss, epoch_num + 1)
        # writer.add_scalar(config.get("output", "model_name") + " train accuracy", train_acc, epoch_num + 1)

        if not os.path.exists(model_path):
            os.makedirs(model_path)
        torch.save(net.state_dict(),
                   os.path.join(model_path, "model-%d.pkl" % (epoch_num + 1)))

        valid_loss, valid_accu = valid_net(net, valid_dataset, use_gpu, config,
                                           epoch_num + 1, writer)
        print('\r', end='', flush=True)
        print(
            '%.4f   % 3d    |  %.4f          %.2f   |  %.4f         % 2.2f   |  %s  |'
            % (lr, epoch_num + 1, train_loss, train_acc * 100, valid_loss,
               valid_accu * 100, time_to_str((timer() - start))))


print_info("training is finished!")
コード例 #4
0
ファイル: word2vec.py プロジェクト: xcjthu/cv_predict_pay
    def __init__(self, path):
        print_info("begin to load word embedding")

        self.model = fasttext.load_model(path)

        print_info("load word embedding succeed")
コード例 #5
0
args = parser.parse_args()

configFilePath = args.config
if configFilePath is None:
    print("python *.py\t--config/-c\tconfigfile")
use_gpu = True

if args.gpu is None:
    use_gpu = False
else:
    use_gpu = True
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

config = ConfigParser(configFilePath)

print_info("Start to build Net")

model_name = config.get("model", "name")
net = get_model(model_name, config)

device = []
if torch.cuda.is_available() and use_gpu:
    device_list = args.gpu.split(",")
    for a in range(0, len(device_list)):
        device.append(int(a))

    net = net.cuda()

    try:
        net.init_multi_gpu(device)
    except Exception as e: