Beispiel #1
0
    def __init__(self, dataset, splitratio=0.9, startline=0):

        self.dataset = DS.Dataset(dataset)
        self.trainData, self.testData = self.dataset.splitdataset(
            splitratio, startline)
        if Global_V.PRINTPAR == 3:
            print(self.trainData, '\n', self.testData)
        self.trainDataSet = DS.Dataset(self.trainData)
        self.testDataSet = DS.Dataset(self.testData)
Beispiel #2
0
 def __init__(self, dataset, splitratio=0.9, startline=0):
     self.dataset = DS.Dataset(dataset)
     self.trainData, self.testData = self.dataset.splitdataset(splitratio, startline)
     if Global_V.PRINTPAR == 3:
         print(self.trainData, '\n', self.testData)
     self.trainDataSet = DS.Dataset(self.trainData)
     self.testDataSet = DS.Dataset(self.testData)
     # 如何使得self.trainDataSet, self.testDataSet也是dataset对象???
     self.parents = []
     self.cmi_temp = [[0] * self.trainDataSet.getNoAttr() for i in range(self.trainDataSet.getNoAttr())]
     self.cmi = self.trainDataSet.getCondMutInf(self.cmi_temp)  # get the cmi with the trainDataSet
Beispiel #3
0
if args.onGPU and torch.cuda.device_count() > 1:
    # model = torch.nn.DataParallel(model)
    model = DataParallelModel(model)
if args.onGPU:
    model = model.cuda()

# compose the data with transforms
valDataset = myTransforms.Compose([
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    myTransforms.ToTensor()
])
# since we training from scratch, we create data loaders at different scales
# so that we can generate more augmented data and prevent the network from overfitting
valLoader = torch.utils.data.DataLoader(myDataLoader.Dataset(
    data['valIm'], data['valAnnot'], transform=valDataset),
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        num_workers=args.num_workers,
                                        pin_memory=args.onGPU)

if os.path.isfile(args.resume):
    print("=> loading checkpoint '{}'".format(args.resume))
    model.load_state_dict(torch.load(args.resume)["state_dict"])
else:
    raise ValueError("Resuming checkpoint does not exists!")

mean = torch.from_numpy(data['mean']).view(1, -1, 1, 1)
std = torch.from_numpy(data['std']).view(1, -1, 1, 1)

for it, (inp, target) in enumerate(valLoader):
Beispiel #4
0
    activation_function = tf.nn.relu
    if args.activation == 'sigmoid':
        activation_function = tf.sigmoid
    elif args.activation == 'tanh':
        activation_function == tf.tanh
    elif args.activation == 'identity':
        activation_function = tf.identity

    file_name = "output/%s/loss=%s layers=%s drop_out=%s lr=%f sr=%f" \
                % (args.dataset, args.loss_type, args.layer_unit, args.drop_out, args.learning_rate, args.social_rate)

    file_hash = str(file_name) + '.output'
    file_output = open(file_hash, 'a')

    data = load_data.Dataset(path=args.dataset + '/', latent_factor=eval(args.layer_unit)[0])
    t1 = time()

    run_label = "OursFinal: dataset=%s, activation_function=%s drop_out=%s, layers=%s, pooling_type=%s, loss_type=%s," \
                "total_epoch=%d, batch_size=%d, learn_rating=%.4f, social_rating=%.4f, user_rate=%.4f, item_rate=%.4f," \
                "attr_rate=%.4f" \
                % (args.dataset, args.activation, args.drop_out,
                   args.layer_unit, args.pooling_type, args.loss_type, args.total_epoch,
                   args.batch_size, args.learning_rate, args.social_rate, args.user_rate, args.item_rate,
                   args.attr_rate)
    # print(run_label)
    file_output.write(run_label + '\n')

    neural_social = NeuralSocial(args.dataset, data.n_user, data.n_attr, data.n_item,
                                 eval(args.layer_unit), eval(args.drop_out), args.pooling_type, args.loss_type,
                                 args.optimizer_type, args.learning_rate, args.social_rate,
Beispiel #5
0
trainTransform = myTransforms.Compose([
    # myTransforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    # myTransforms.RandomCropResize(int(7./224.*args.inWidth)),
    # myTransforms.RandomFlip(),
    myTransforms.ToTensor()
])
valTransform = myTransforms.Compose([
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    myTransforms.ToTensor()
])

train_set = myDataLoader.Dataset(data['trainIm'],
                                 data['trainDepth'],
                                 data['trainAnnot'],
                                 transform=trainTransform)
val_set = myDataLoader.Dataset(data['valIm'],
                               data['valDepth'],
                               data['valAnnot'],
                               transform=valTransform)

train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.num_workers,
                                           pin_memory=True,
                                           drop_last=True)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=args.test_batch_size,
                                         shuffle=False,