Exemple #1
0
                    choices=[2, 4, 8],
                    help='upscale factor')
parser.add_argument('--epochs',
                    default=100,
                    type=int,
                    help='train epoch number')

if __name__ == '__main__':
    opt = parser.parse_args()

    crop_size = opt.crop_size
    upscale = opt.upscale_factor
    epoch = opt.epochs

    train_set = load_training_data('DIV2K_train_HR',
                                   crop_size=crop_size,
                                   upscale=upscale)
    val_set = load_val_data('DIV2K_valid_HR', upscale=upscale)
    train_loader = DataLoader(dataset=train_set,
                              num_workers=4,
                              batch_size=64,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_set,
                            num_workers=4,
                            batch_size=1,
                            shuffle=False)

    netG = Generator(upscale)
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
#sys.stdout = f
print("Writing to {}\n".format(out_dir))

print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
    print("{}={}".format(attr.upper(), value))
print("")

with open(out_dir+'/params', 'w') as f:
    for attr, value in sorted(FLAGS.__flags.items()):
        f.write("{}={}".format(attr.upper(), value))
        f.write("\n")

# hyper-parameters end here
training_path = 'training_set_rel3.tsv'
essay_list, resolved_scores, essay_id = data_utils.load_training_data(training_path, essay_set_id)

max_score = max(resolved_scores)
min_score = min(resolved_scores)
if essay_set_id == 7:
    min_score, max_score = 0, 30
elif essay_set_id == 8:
    min_score, max_score = 0, 60
print 'max_score is {} \t min_score is {}\n'.format(max_score, min_score)
with open(out_dir+'/params', 'a') as f:
    f.write('max_score is {} \t min_score is {} \n'.format(max_score, min_score))

# include max score
score_range = range(min_score, max_score+1)

#word_idx, _ = data_utils.build_vocab(essay_list, vocab_limit)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense

import numpy as np
import data_utils

data_dim = 128
timesteps = 128

train_data = data_utils.load_training_data()
test_data = data_utils.load_test_data()

#    loader = data_utils.DataLoader(data=data,batch_size=train_config.batch_size, num_steps=train_config.num_steps)
data_loader = data_utils.DataLoader(train_data, 7352, 1)
# x_test, y_test = data_utils.DataLoader(train_data, 128, 1).next_batch()

# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(
    LSTM(256, return_sequences=True,
         input_shape=(1, 1)))  # returns a sequence of vectors of dimension 32
model.add(LSTM(
    256,
    return_sequences=True))  # returns a sequence of vectors of dimension 32
model.add(LSTM(
    256, return_sequences=True))  # return a single vector of dimension 32
model.add(Dense(128, activation="sigmoid", name="DENSE1"))
model.add(Dense(72, activation="sigmoid", name="DENSE2"))

model.add(Dense(1, activation='softmax'))