예제 #1
0
def adi(iterations=10000):
    model = None
    if os.path.exists(model_path):
        model = keras.models.load_model(model_path)
    else:
        model = buildModel(20*24)
        compile_model(model, 0.001)

    for it in range(iterations):

        # generate N scrambled cubes
        l = 100
        k = 20
        cube_agent = CubeAgent(number_of_cubes=l, batches=k)

        cube_agent.scramble_cubes_for_data()

        
        cubes = np.array(cube_agent.env).flatten()

        #initialize the training parameters -> marked by X and Y in the paper
        encodedStates = np.empty((k*l, 20*24)) 
        values = np.empty((k*l, 1))
        policies = np.empty(k*l)

        # iterate through the number of cubes and the number of actions
        i = 0
        for stateNumber , state in enumerate(cubes):
            valuesForState = np.zeros(len(state.action_space))

            encodedStates[i] = np.array(state.get_one_hot_state().flatten())
            actions = state.action_space

            start_state = state.cube.copy()

            #1-depth BFS 
            for j, action in enumerate(actions):
                _ , reward = state.step(j)
                childStateEncoded = np.array(state.get_one_hot_state()).flatten()
                state.set_state(start_state) #set back to the original

                value, policy = model.predict(childStateEncoded[None, :])
                valueNumber = value[0][0]
                valuesForState[j] = valueNumber + reward

            values[i] = np.array([valuesForState.max()])
            policies[i] = valuesForState.argmax()
            i += 1
        
        #log into Tensorboad
        log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

        sample_weight = np.array([[1/(i+1) for i in range(k)] for j in range(l)]).flatten()

        model.fit(encodedStates,
                 { "output_policy": policies, "output_value": values },
                 epochs=15, sample_weight=sample_weight)
        model.save(model_path)
    return model
예제 #2
0
def main(args):

    # Loads the dataset
    X_TRAIN = pickle.load(
        open(f"{args['data_destination']}/X_TRAIN.pickle", "rb"))
    Y_TRAIN = pickle.load(
        open(f"{args['data_destination']}/Y_TRAIN.pickle", "rb"))
    #X_TEST = pickle.load(open(f"{args['data_destination']}/X_TEST.pickle", "rb"))
    #Y_TEST = pickle.load(open(f"{args['data_destination']}/Y_TEST.pickle", "rb"))
    #X_VALIDATION = pickle.load(open(f"{args['data_destination']}/X_VALIDATION.pickle", "rb"))
    #Y_VALIDATION = pickle.load(open(f"{args['data_destination']}/Y_VALIDATION.pickle", "rb"))

    #normalize
    X_TRAIN = X_TRAIN / 255.0
    #X_TEST = X_TEST/255.0
    #X_VALIDATION = X_VALIDATION/255.0

    dct_model = model.buildModel(
        tuple(int(dim) for dim in args['shape'].split(',')),
        int(args['classes']))
    model.train(dct_model, X_TRAIN, Y_TRAIN, int(args['batch_size']),
                int(args['epochs']))
    model_sum = dct_model.summary()
    print(f'model summary: {model_sum}')
    # model.eval(dct_model, X_TEST, Y_TEST, int(args['batch_size']))
    try:
        model.predict(dct_model, X_TEST)
    except:
        print("model.predict gave an error")
    model.saveModel(dct_model)
예제 #3
0
def main():
    data_directory = '/home/linux-box0520/data/food-101/data2/'
    TRAIN_BATCHSIZE = 16
    EVAL_BATCHSIZE = 1

    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            #transforms.RandomApply([
            #    transforms.RandomHorizontalFlip(),
            #    transforms.RandomVerticalFlip(),
            #    transforms.ColorJitter(),
            #    transforms.RandomRotation((1,360))]),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val':
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
    }

    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_directory, x),
                                data_transforms[x])
        for x in ['train', 'val']
    }
    dataloaders = {
        x: torch.utils.data.DataLoader(
            image_datasets[x],
            batch_size=(TRAIN_BATCHSIZE if x == 'train' else EVAL_BATCHSIZE),
            shuffle=True,
            num_workers=1)
        for x in ['train', 'val']
    }
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = buildModel()

    model = model.to(device)
    loss = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=.005, momentum=.93)

    model = train_model(model,
                        loss,
                        optimizer,
                        dataloaders,
                        dataset_sizes,
                        device,
                        num_epochs=200)

    torch.save(model.state_dict(), 'deconvNet-200epochs-3classes.pt')
예제 #4
0
파일: app.py 프로젝트: stevetapley/mame-ai
def playGame(observe=False):
    tensorModel = buildModel()
    game = Game()
    agent = Agent(game)
    train = Train(agent, game)

    try:
        train.trainNetwork(tensorModel, observe=observe)
    except StopIteration:
        game.End()
예제 #5
0
def runTournamentBracket(model=None):
    year = conf.year
    if model is None:
        model = Model.buildModel(200)

    # print ("Running Tournament")
    west = Bracket.runbracket(conf.teams['west'], model)
    east = Bracket.runbracket(conf.teams['east'], model)
    south = Bracket.runbracket(conf.teams['south'], model)
    midwest = Bracket.runbracket(conf.teams['midwest'], model)
    final4teams = [west[4][0], east[4][0], south[4][0], midwest[4][0]]
    final4 = Bracket.runbracket(final4teams, model)
    champion = final4[2][0]
    return {
        'year': 2016,
        'west': west,
        'east': east,
        'south': south,
        'midwest': midwest,
        'final4': final4,
        'champion': champion
    }
def loadModel(stateDictPath):
    model = buildModel()
    model.load_state_dict(torch.load(stateDictPath))
    model.eval()
    return model
예제 #7
0
def train_(base_path):
    data, anno = read_data(base_path)
    anno = np.expand_dims(anno, axis=-1)

    mean = np.mean(data)
    std = np.std(data)

    data_ = (data - mean) / std

    train_data = data_[:150]
    train_anno = anno[:150]

    val_data = data_[150:]
    val_anno = anno[150:]

    print('-' * 30)
    print(
        'Creating and compiling the fully convolutional regression networks.')
    print('-' * 30)

    model = buildModel(input_dim=(256, 256, 3))
    model_checkpoint = ModelCheckpoint('cell_counting.hdf5',
                                       monitor='loss',
                                       save_best_only=True)
    model.summary()
    print('...Fitting model...')
    print('-' * 30)
    change_lr = LearningRateScheduler(step_decay)

    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        30,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.3,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.3,  # randomly shift images vertically (fraction of total height)
        zoom_range=0.3,
        shear_range=0.,
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True,  # randomly flip images
        fill_mode='constant',
        dim_ordering='tf')

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(
        datagen.flow(train_data, train_anno, batch_size=16),
        samples_per_epoch=train_data.shape[0],
        nb_epoch=96,
        callbacks=[model_checkpoint, change_lr],
    )

    model.load_weights('cell_counting.hdf5')
    A = model.predict(val_data)
    mean_diff = np.average(
        np.abs(np.sum(np.sum(A, 1), 1) -
               np.sum(np.sum(val_anno, 1), 1))) / (100.0)
    print('After training, the difference is : {} cells per image.'.format(
        np.abs(mean_diff)))
예제 #8
0
np.save(outputDir + "e_files_trainBatches", e_data[0])
np.save(outputDir + "e_events_trainBatches", e_data[1])
np.save(outputDir + "e_files_valBatches", e_data[2])
np.save(outputDir + "e_events_valBatches", e_data[3])
np.save(outputDir + "bkg_files_trainBatches", bkg_data[0])
np.save(outputDir + "bkg_events_trainBatches", bkg_data[1])
np.save(outputDir + "bkg_files_valBatches", bkg_data[2])
np.save(outputDir + "bkg_events_valBatches", bkg_data[3])

# initialize generators
train_generator = generator(e_data[0], e_data[1], bkg_data[0], bkg_data[1],
                            batch_size, dataDir, False, True, False)
val_generator = generator(e_data[2], e_data[3], bkg_data[2], bkg_data[3],
                          batch_size, dataDir, False, True, False)

model = buildModel()

model.compile(optimizer=optimizers.Adam(),
              loss='categorical_crossentropy',
              metrics=metrics)

callbacks = [
    callbacks.EarlyStopping(monitor=monitor, patience=patience_count),
    callbacks.ModelCheckpoint(filepath=weightsDir + 'model.{epoch}.h5',
                              save_best_only=True,
                              monitor=monitor,
                              mode='auto')
    # tf.keras.callbacks.TensorBoard(log_dir=logDir,
    #                                histogram_freq=0,
    #                                write_graph=False,
    #                                write_images=False)
예제 #9
0
if movie == '' or movie == None:
    print('Error. Usage: python index.py --movie "<movie>"')
    sys.exit()

try:
    numberOfTweets = 1
    tweets = []
    ratingValue = 0

    if model.isModelExists():
        print("Loading Classifier Model...")
        vectorizer, classifier = model.loadModel()
    else:
        print("Building Classifier Model...")
        # Start building models
        vectorizer, classifier = model.buildModel()

    print("Retrieving Tweets...")
    tso = TwitterSearchOrder()
    tso.set_keywords([movie])
    tso.set_language('en')
    tso.set_include_entities(False)

    ts = TwitterSearch(
        consumer_key='kbibzVdoRoKOwd3dlxZCobum5',
        consumer_secret='qEz32mJANlQ5hbFGacxmMfO2Pmyexs3WgPFeGq4QzA88qAOKe8',
        access_token='1348353366-ofrMAMNiFfz102VY9c3MXdTrsAD2c4Dq91QiWVD',
        access_token_secret='Io7orEnOvE2Rv2sdiASLRkLTVFA93DmSyF4r1i9CYQCXn')
    for tweet in ts.search_tweets_iterable(tso):
        preprocessed = preprocess.preprocess(tweet['text'])
        tweets.append(preprocessed)
예제 #10
0
                  dest="load",
                  help="Load model matchup probabilities from file "
                  "(precalculated and saved using --save)",
                  action="store_true",
                  default=False)
parser.add_option("--save",
                  dest="save",
                  help="Save the calculated matchup probabilities to a file",
                  action="store_true",
                  default=False)

options, args = parser.parse_args()

model = None
if options.load:
    model = Model.buildModel(load=True)
else:
    model = Model.buildModel(nForests=options.nModels,
                             nTrees=options.nTrees,
                             save=options.save)

if options.calcStats:
    TournamentStats.calculatePredictionStats(model, 100000)
elif options.makeBracket:
    if options.winner:
        TournamentStats.makeRandomBracket(model, options.winner)
    else:
        TournamentStats.makeRandomBracket(model)
elif options.predict:
    team1 = options.predict[0]
    team2 = options.predict[1]
예제 #11
0
    "projectId": "sub-it-a29ca",
    "storageBucket": "sub-it-a29ca.appspot.com",
    "messagingSenderId": "744036348319",
    "appId": "1:744036348319:web:8677ea4aeddccdda3fcdaa",
    "measurementId": "G-STLRB5PMY1"
}

# Firebase integration
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
user = None
userID = None
db = firebase.database()

# Build ML model
modelVar = model.buildModel()

# Open ingredients JSON
with open("./alt_ingr.json", 'r') as fp:
    alt_ingr = json.load(fp)

with open("./ingr_co2.json", 'r') as fp:
    ingr_co2 = json.load(fp)

app = Flask(__name__)

@app.route("/")
def home():
    # Redirect root to dashboard
    return redirect(url_for("dash"))
예제 #12
0
def main():
    trainDataFile = pd.read_csv(DATA_DIR + 'train_ship_segmentations_v2.csv')
    print("Training Data (csv) File Shape:\t", trainDataFile.shape)

    # Labeling NaN values
    trainDataFile['NaN'] = trainDataFile['EncodedPixels'].apply(isNaN)
    trainDataFile = trainDataFile.iloc[100000:]
    trainDataFile = trainDataFile.sort_values('NaN', ascending=False)
    print("\nNaN Value Count")
    print(trainDataFile['NaN'].value_counts())

    # Calculating Areas
    trainDataFile['area'] = trainDataFile['EncodedPixels'].apply(calculateArea)
    IsShip = trainDataFile[trainDataFile['area'] > 0]

    train_group = trainDataFile.groupby('ImageId').sum()
    print("\nGrouping Entries with same ImageId\nNaN Value Count")
    print(trainDataFile['NaN'].value_counts())

    train_group = train_group.reset_index()

    # Assigning Classes
    train_group['class'] = train_group['area'].apply(assignClasses)
    print("\nClasses of Ships")
    print(train_group['class'].value_counts())

    # Train, Validation Split
    trainingSet, validationSet = train_test_split(
        train_group, test_size=0.01, stratify=train_group['class'].tolist())
    print("\nTraining Set Shape:\t", trainingSet.shape[0])
    print("Validation Set Shape:\t", validationSet.shape[0])

    trainingSet_ship = trainingSet['ImageId'][trainingSet['NaN'] == 0].tolist()
    trainingSet_nan = trainingSet['ImageId'][trainingSet['NaN'] == 1].tolist()
    # Randomizing
    trainingSet_ship = random.sample(trainingSet_ship, len(trainingSet_ship))
    trainingSet_nan = random.sample(trainingSet_nan, len(trainingSet_nan))
    EQUALIZED_DATA = min(len(trainingSet_ship), len(trainingSet_nan))

    validationSet_ship = validationSet['ImageId'][validationSet['NaN'] ==
                                                  0].tolist()
    validationSet_nan = validationSet['ImageId'][validationSet['NaN'] ==
                                                 1].tolist()
    print("Training Set (Ships, Not Ships):\t", len(trainingSet_ship),
          len(trainingSet_nan))

    datagen = customGenerator(trainDataFile,
                              trainingSet_ship,
                              trainingSet_nan,
                              batchSize=BATCH_SIZE,
                              equalizedData=EQUALIZED_DATA)
    valgen = customGenerator(trainDataFile,
                             validationSet_ship,
                             validationSet_nan,
                             batchSize=VALIDATION_BATCH_SIZE,
                             equalizedData=EQUALIZED_DATA)

    validation_x, validation_y = next(valgen)

    model = buildModel()
    model.summary()

    model.compile(optimizer=Adam(1e-3, decay=0.0),
                  metrics=['accuracy', f1],
                  loss='mean_squared_error')

    model.save('ship_imageProcessing.h5')

    # model = load_model('ship.h5', custom_objects={"f1" : f1})
    # Training
    history = model.fit_generator(datagen,
                                  steps_per_epoch=250,
                                  epochs=NUM_EPOCHS,
                                  verbose=1,
                                  validation_data=(validation_x, validation_y))

    plt.subplot(2, 1, 1)
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model Accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='lower right')

    plt.subplot(2, 1, 2)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model Loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper right')

    plt.tight_layout()
    plt.savefig('../Images/ModelPerformance.png')
    plt.show()

    predictions = model.predict(validation_x)
    # print("First Prediction: ", predictions[0])

    score = model.evaluate(validation_x, validation_y, verbose=1)
    print('Validation loss:', score[0])
    print('Validation accuracy:', score[1])
def main():
    parser = argparse.ArgumentParser(
        description=
        "Train a bidirectional LSTM model for text sentiment classification")
    parser.add_argument("--train_mode",
                        type=str,
                        default='preset_param',
                        choices=['preset_param', 'kerastuner'],
                        help="Set the training mode (preset_param/kerastuner)")
    parser.add_argument("--batch_size",
                        type=int,
                        default=64,
                        help="Batch size")
    parser.add_argument("--sen_len",
                        type=int,
                        default=20,
                        help="Maximum length of a sentence")
    parser.add_argument("--lstm1",
                        type=int,
                        default=32,
                        help="Hidden dimension of first LSTM")
    parser.add_argument("--lstm2",
                        type=int,
                        default=32,
                        help="Hidden dimension of second LSTM")
    parser.add_argument("--dp_rate",
                        type=float,
                        default=0.5,
                        help="Dropout rate (percentage of droping)")
    parser.add_argument("--lr", type=float, default=1e-3, help="Learning rate")
    parser.add_argument("--epochs", type=int, default=1, help="epochs")

    parser.add_argument("--epochs_before_search",
                        type=int,
                        default=1,
                        help="epochs_before_search")
    parser.add_argument("--epochs_after_search",
                        type=int,
                        default=1,
                        help="epochs_after_search")
    parser.add_argument("--max_trials",
                        type=int,
                        default=1,
                        help="max_trials for kerastuner")
    parser.add_argument("--executions_per_trial",
                        type=int,
                        default=1,
                        help="executions_per_trial for kerastuner")

    args = parser.parse_args()

    # Setup paths
    path_prefix = Path.cwd()
    train_with_label = os.path.join(path_prefix, 'data/training_label.txt')
    train_no_label = os.path.join(path_prefix, 'data/training_nolabel.txt')
    testing_data = os.path.join(path_prefix, 'data/testing_data.txt')
    w2v_path = path_prefix.joinpath('model/w2v_all.model')

    # Configuration
    batch_size = args.batch_size
    sen_len = args.sen_len

    # Preprocess dataset
    ## Read 'training_label.txt' and 'training_nolabel.txt'
    print("loading training data ...")
    X_train_lable, y_train_lable = load_training_data(train_with_label)
    X_train, X_val, y_train, y_val = train_test_split(X_train_lable,
                                                      y_train_lable,
                                                      test_size=0.1)

    train_x_no_label = load_training_data(train_no_label)

    print(
        f"Positive rate in training dataset: {np.sum(y_train) / len(y_train)}")
    print(f"Positive rate in validation dataset: {np.sum(y_val) / len(y_val)}")

    ## Build the preprocessor
    preprocessor = Preprocess(sen_len, w2v_path=str(w2v_path))
    embedding = preprocessor.make_embedding(load=True)
    X_train_idx = preprocessor.sentences_word2idx(X_train)
    X_val_idx = preprocessor.sentences_word2idx(X_val)

    print(f"Pretrained embedding matrix shape: {embedding.shape}")

    ## Preprocess training and validation datasets
    X_train_idx_dataset = tf.data.Dataset.from_tensor_slices(X_train_idx)
    y_train_dataset = tf.data.Dataset.from_tensor_slices(y_train)
    train_dataset = tf.data.Dataset.zip((X_train_idx_dataset, y_train_dataset))

    X_val_idx_dataset = tf.data.Dataset.from_tensor_slices(X_val_idx)
    y_val_dataset = tf.data.Dataset.from_tensor_slices(y_val)
    val_dataset = tf.data.Dataset.zip((X_val_idx_dataset, y_val_dataset))

    train_dataset = train_dataset.batch(batch_size)
    val_dataset = val_dataset.batch(batch_size)

    train_dataset = train_dataset.cache().prefetch(AUTOTUNE)
    val_dataset = val_dataset.cache().prefetch(AUTOTUNE)

    # Train a bidirectional LSTM model
    train_embedding = False  # fix embedding during training

    ## Method1 - preset parameters
    if args.train_mode == 'preset_param':
        ### Build the model
        hidden_dim1 = args.lstm1
        hidden_dim2 = args.lstm2
        dp_rate = args.dp_rate
        lr = args.lr
        epochs = args.epochs

        model = buildModel(embedding, train_embedding, sen_len, hidden_dim1,
                           hidden_dim2, dp_rate, lr)

        model.summary()

        ### Train the model
        checkpoint_filepath = os.path.join(path_prefix, 'ckpt/')
        model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_filepath, save_best_only=True)

        history = model.fit(train_dataset,
                            validation_data=val_dataset,
                            epochs=epochs,
                            callbacks=[model_checkpoint_callback])

    elif args.train_mode == 'kerastuner':
        import IPython
        from kerastuner.tuners import RandomSearch

        class ClearTrainingOutput(tf.keras.callbacks.Callback):
            def on_train_end(*args, **kwargs):
                IPython.display.clear_output(wait=True)

        ### Build the model
        tuner = RandomSearch(BiLstmTuner(embedding, train_embedding, sen_len),
                             objective='val_accuracy',
                             max_trials=args.max_trials,
                             executions_per_trial=args.executions_per_trial,
                             directory=os.path.join(path_prefix, 'tuner_dir'),
                             project_name='tsc')

        ### Train the model
        tuner.search(
            train_dataset,
            epochs=args.epochs_before_search,
            validation_data=val_dataset,
            verbose=1,
            callbacks=[ClearTrainingOutput()],
        )

    # Load the best model
    print('\nload model ...')

    ## Method1
    if args.train_mode == 'preset_param':
        best_model = tf.keras.models.load_model(checkpoint_filepath)
    ## Method2
    elif args.train_mode == 'kerastuner':
        tuner.results_summary(num_trials=min(3, args.max_trials))
        best_model = tuner.get_best_models()[0]
        best_model.summary()

        # Train again with training set and validation set
        combined_dataset = train_dataset.concatenate(val_dataset)
        best_model.fit(combined_dataset, epochs=args.epochs_after_search)

    # Testing
    ## Preprocess test dataset
    print("loading testing data ...")
    X_test = load_testing_data(testing_data)
    X_test_idx = preprocessor.sentences_word2idx(X_test)

    test_dataset = tf.data.Dataset.from_tensor_slices(X_test_idx)
    test_dataset = test_dataset.batch(batch_size)
    test_dataset = test_dataset.cache().prefetch(AUTOTUNE)

    ## Predict
    outputs = testing(best_model, test_dataset)

    # Write the result to a CSV file
    tmp = pd.DataFrame({
        "id": [str(i) for i in range(len(X_test))],
        "label": outputs
    })
    print("save csv ...")
    tmp.to_csv(os.path.join(path_prefix, 'predict.csv'), index=False)
    print("Finish Predicting")
예제 #14
0
def _run():
    rand = np.random.RandomState(42)

    layers, weights = _model.buildModel(rand, batchSize=None, sampleMode=4)

    for k, v in weights.items():
        print("Weight '{}' has shape {}".format(k, v.get_shape()))
    for x in layers:
        print("Layer '{}' has shape {}".format(x.name, x.get_shape()))

    g_x = layers[0]
    g_logits = layers[-1]
    shapeDst = tuple(g_logits.get_shape().as_list())

    g_y = tf.placeholder(tf.float32, (None, ))
    g_yRep = tf.tile(tf.reshape(g_y, shape=(-1, 1, 1, 1)),
                     multiples=(1, ) + shapeDst[1:])

    # Learning rate placeholder.
    g_lr = tf.placeholder(tf.float32, ())

    # For reporting statistics.
    g_correct = tf.equal(tf.cast(tf.greater(g_logits, 0.0), tf.float32),
                         g_yRep)
    g_correct_sum = tf.reduce_sum(tf.cast(
        g_correct, tf.int32)) / (shapeDst[1] * shapeDst[2])

    # Apply sigmoid and cross entropy.
    g_ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=g_yRep,
                                                   logits=g_logits)

    # We'll use the loss-sum when reporting statistics, and the loss-mean when training.
    g_loss_sum = tf.reduce_sum(g_ce)
    g_loss_mean = tf.reduce_mean(g_ce)

    # Use Adam with the place-holder learning rate.
    g_optimizer = tf.train.AdamOptimizer(learning_rate=g_lr)
    g_train_step = g_optimizer.minimize(g_loss_mean)

    def _eval(sess, xs, ys, batchSize=64):
        """ Returns the number of correct predictions, sum of losses, and the number of samples.
    The caller can divide the first two by the third to get averages.
    """
        assert batchSize > 0
        assert xs.shape[0] == ys.shape[0]
        num = xs.shape[0]

        acc_sum = 0
        loss_sum = 0.0
        for ii in range(0, num, batchSize):
            xsCur = xs[ii:ii + batchSize]
            ysCur = ys[ii:ii + batchSize]
            acc, loss = sess.run((g_correct_sum, g_loss_sum),
                                 feed_dict={
                                     g_x: xsCur,
                                     g_y: ysCur
                                 })
            acc_sum += acc
            loss_sum += loss
        return acc_sum, loss_sum, num

    def _trainOne(sess, xs, ys, rate, batchSize=64):
        """ Performs one training epoch. """
        assert batchSize > 0
        assert xs.shape[0] == ys.shape[0]
        num = xs.shape[0]

        # Generate a random permutation of the training data.
        indices = rand.permutation(num)

        # Run the optimizer on each batch.
        for ii in range(0, num, batchSize):
            inds = indices[ii:ii + batchSize]
            xsCur = xs[inds]
            ysCur = ys[inds]
            sess.run(g_train_step,
                     feed_dict={
                         g_x: xsCur,
                         g_y: ysCur,
                         g_lr: rate
                     })

    def _trainMulti(sess,
                    epochs,
                    xsTrain,
                    ysTrain,
                    xsValid,
                    ysValid,
                    rate,
                    batchSize=64):
        """ Performs multiple epochs, and reports statistics after each. """
        assert epochs > 0

        print("*** Starting {} epochs with batch size {} and learning rate {}".
              format(epochs, batchSize, rate))
        for i in range(epochs):
            t0 = time.time()
            _trainOne(sess, xsTrain, ysTrain, rate, batchSize)
            t1 = time.time()
            print("  Epoch {} took: {:.04f} sec".format(i, t1 - t0))

            acc, loss, num = _eval(sess, xsTrain, ysTrain, batchSize)
            print("    Training   accuracy and loss: {:.03f}, {:.03f}".format(
                acc / num, loss / num))
            acc, loss, num = _eval(sess, xsValid, ysValid, batchSize)
            print("    Validation accuracy and loss: {:.03f}, {:.03f}".format(
                acc / num, loss / num))

    featuresTrain, labelsTrain, featuresValid, labelsValid = _loadAndSplitData(
        rand, frac=0.9)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Report the initial statistics (before any epochs). Generally, the accuracy is expected to be around 1 / 2.
        acc, loss, num = _eval(sess, featuresTrain, labelsTrain)
        print("Training   accuracy and loss before: {:.03f}, {:.03f}".format(
            acc / num, loss / num))
        acc, loss, num = _eval(sess, featuresValid, labelsValid)
        print("Validation accuracy and loss before: {:.03f}, {:.03f}".format(
            acc / num, loss / num))

        batchSize = 64
        _trainMulti(sess,
                    5,
                    featuresTrain,
                    labelsTrain,
                    featuresValid,
                    labelsValid,
                    rate=0.00100,
                    batchSize=batchSize)
        _trainMulti(sess,
                    3,
                    featuresTrain,
                    labelsTrain,
                    featuresValid,
                    labelsValid,
                    rate=0.00030,
                    batchSize=batchSize)
        _trainMulti(sess,
                    3,
                    featuresTrain,
                    labelsTrain,
                    featuresValid,
                    labelsValid,
                    rate=0.00010,
                    batchSize=batchSize)
        # _trainMulti(sess, 5, featuresTrain, labelsTrain, featuresValid, labelsValid, rate=0.00005, batchSize=batchSize)

        _model.saveModelWeights(sess, weights)