def __init__(self): # Input shape self.class_num = config.class_num self.img_shape = config.img_shape self.img_width = config.img_width self.img_height = config.img_height self.img_channel = config.img_channel self.gf = 32 self.df = 64 self.patch = int(config.img_height // (2**4)) self.patch_size = (self.patch, self.patch, 1) self.s1 = Dataset('./t1ce', './tice_label') self.s2 = Dataset('./t2', './t2_label') self.target = Dataset('./OpenBayes', './Openbayes_label', need_resize=1) optimizer = RMSprop(0.0002) self.D = self.build_discriminator() self.G = self.build_generator() self.G.trainable = False real_img = Input(shape=config.img_shape) real_src, real_cls = self.D(real_img) fake_cls = Input(shape=(self.class_num, )) fake_img = self.G([real_img, fake_cls]) fake_src, fake_output = self.D(fake_img) self.Train_D = Model([real_img, fake_cls], [real_src, real_cls, fake_src, fake_output]) self.Train_D.compile(loss=[ 'mse', self.classification_loss, 'mse', self.classification_loss ], optimizer=optimizer, loss_weights=[1.0, 1.0, 1.0, 1.0]) self.G.trainable = True self.D.trainable = False real_x = Input(shape=self.img_shape) now_label = Input(shape=(self.class_num, )) target_label = Input(shape=(self.class_num, )) fake_x = self.G([real_x, target_label]) fake_out_src, fake_out_cls = self.D(fake_x) x_rec = self.G([fake_x, now_label]) self.train_G = Model([real_x, now_label, target_label], [fake_out_src, fake_out_cls, x_rec]) self.train_G.compile(loss=['mse', self.classification_loss, 'mae'], optimizer=optimizer, loss_weights=[1.0, 1.0, 1.0]) '''
''' @author: slade @file: main.py @time: 2020/9/28 16:31 @desc: ''' import os import datetime from DataSet import Dataset from utils import * import tensorflow as tf from dssm import DSSM config = Config() dataset = Dataset() nwords = dataset._vocab_size trainData, evalData = dataset.dataGen() train_epoch_steps = int(len(trainData) / Config.batchSize) - 1 eval_epoch_steps = int(len(evalData) / Config.batchSize) - 1 # 定义计算图 with tf.Graph().as_default(): session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={"CPU": 78}) sess = tf.Session(config=session_conf) # 定义会话 with sess.as_default():
testPath = 'nsynth-test/audio/' LOG_PATH = 'logs/log.pkl' train_transforms = transforms.Compose([ transforms.ToPILImage(), transforms.Resize([128, 128]), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) test_transforms = transforms.Compose([ transforms.ToPILImage(), transforms.Resize([128, 128]), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) data_train = Dataset(trainPath, transform=train_transforms) data_test = Dataset(testPath, transform=test_transforms) ################################################### loss = [] Acc = [] Train_loss = [] # Now the actual training code use_cuda = USE_CUDA and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") print('Using device', device) import multiprocessing print('num cpus:', multiprocessing.cpu_count())
x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) output = self.layer4(x) return output if __name__ == '__main__': model_out_file = 'Pretrain/%s_MLP_%s_%d.h5' % (configs.dataset, configs.layers, time()) # --------------Loading data------------- t1 = time() dataset = Dataset('Data/%s' % configs.dataset) train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives num_users, num_items = train.shape print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d" % (time() - t1, num_users, num_items, train.nnz, len(testRatings))) # -------------Build model-------------- model = MLP(num_users, num_items, configs.layers, configs.reg_layers) # -------------Compile model------------- if configs.learner.lower() == "adagrad": model.compile(optimizer=optimizers.Adagrad(lr=configs.learning_rate), loss='binary_crossentropy') elif configs.learner.lower() == "rmsprop": model.compile(optimizer=optimizers.RMSprop(lr=configs.learning_rate), loss='binary_crossentropy')