示例#1
0
def tower_loss(scope, embeddings, labels, is_train):
    """Calculate the total loss on a single tower running the model.

    params:
       scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
       embeddings(float32): embeddings of each word in a sentence of shape
                            [batch_size, max_words, emb_size]
       labels: Labels. 1D tensor of shape [batch_size, num_classes].
       is_train: a bool variable which can be toggled during train and test
    """
    is_train = tf.Print(is_train, [is_train], 'Value of is_train is: ')

    # get the embeddings of the sentence using 1d cnns
    logits = model.model_cnn(embeddings, ngrams)
    print('AB', logits.get_shape(), labels.get_shape())

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    loss, accuracy = model.compute_cross_entropy_loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)
    accues = tf.get_collection('accuracy', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')
    total_accuracy = tf.add_n(accues, name='total_accuracy')

    # Attach a scalar summary to all individual losses and the total loss;
    # do the same for the averaged version of the losses.
    # for l in losses + [total_loss]:
    #   # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
    #   # training session. This helps the clarity of presentation
    #   # on tensorboard.
    #   loss_name = re.sub('%s_[0-9]*/' % 'Tower', '', l.op.name)
    #   tf.summary.scalar(loss_name, l)

    return total_loss, total_accuracy
def train_cnn():
    #model = load_model("data/models/cnn_double_chkpt.h5")
    model = models.model_cnn()
    model.summary()

    tensorboard  = TensorBoard(log_dir='./cnn1_graph', histogram_freq=0, write_graph=True, write_images=True)
    checkpoint = ModelCheckpoint("data/models/cnn_chkpt.h5", monitor='val_loss', save_best_only=True, verbose=1, mode="min")
    stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=1)

    model.fit_generator(batcher.train_gen(),
                    steps_per_epoch=batcher.train_steps,
                    validation_data=batcher.validation_gen(),
                    validation_steps=batcher.validation_steps,
                    epochs=8,
                    callbacks=[tensorboard, checkpoint, stopping])

    model.save("data/models/cnn1.h5")
    batcher.reset()
    model = load_model("data/models/cnn_chkpt.h5")
    eval = model.evaluate_generator(batcher.validation_gen(), steps=batcher.validation_steps)
    print("CNN", eval)

    batcher.reset()
示例#3
0
文件: train.py 项目: GeorgeLjc/MNIST
parser.add_argument('--epoch',
                    type=int,
                    default=1,
                    metavar='EP',
                    help='training epoch (default: 1)')
args = parser.parse_args()

#set epoch
epoch = args.epoch

#whether gpu is available
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")

#create model
model = model_cnn().to(device)
model.train()

#create training dataset
training_dataset = MNIST(train_data, train_target)

#create validation dataset
validation_dataset = MNIST(validation_data, validation_target)

#create train loader
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(training_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
示例#4
0
import torch

#read data from csv file
test_data = pd.read_csv('test.csv')
test_data = np.array(test_data.values)
test_size = test_data.shape[0]
test_label = np.array([[0] for i in range(test_size)])
test_id = np.array(range(1, test_size + 1))

#set parameters
#whether gpu is available
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")

#create model
model = model_cnn()
if (cuda):
    model = model.to(device)

#load model
model.load_state_dict(torch.load('params.pkl'))
model.eval()

#create validation dataset
testing_dataset = MNIST(test_data, test_label)

#create train loader
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
test_loader = torch.utils.data.DataLoader(testing_dataset,
                                          batch_size=1024,
                                          shuffle=False,
示例#5
0
                              monitor='val_acc',
                              save_best_only=True,
                              mode='auto')

reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.2,
                              patience=10,
                              verbose=0,
                              mode='auto',
                              min_delta=0.0001,
                              cooldown=0,
                              min_lr=0.001)

BATCH = PARAM.batch_size
EPOCHS = PARAM.epochs

## define model
models = model_cnn()
models.compile(optimizer='adam',
               loss='categorical_crossentropy',
               metrics=['accuracy'])
# print(models.summary())
## train the model 500 epochs
train = models.fit_generator(train_datagen.flow((trainX, trainY),
                                                batch_size=BATCH),
                             steps_per_epoch=trainX.shape[0] // (4 * BATCH),
                             epochs=EPOCHS,
                             validation_data=test_datagen.flow((testX, testY)),
                             validation_steps=BATCH,
                             callbacks=[checkpoints, reduce_lr])
示例#6
0
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
import time

files_train = 0
files_validation = 0

batch_size = 32
epochs = 15
num_classes = 2

# globe config
nb_train_samples, nb_validation_samples, train_generator, validation_generator = dataset.get_data()


# build model
model_final = model.model_cnn((48, 48, 3), num_classes)

# model_final.load_weights("car1.h5", by_name=True)
# Save the model according to the conditions
checkpoint = ModelCheckpoint("car1_2.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False,
                             mode='auto', period=1)

early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')



tensor_board = TensorBoard(log_dir="logs/UFPR04/Rainy", histogram_freq=0, batch_size=32, update_freq='batch')


# Start training!
history_object = model_final.fit_generator(