示例#1
0
total_time = 0

# get the whole model
vgg = vgg16()

start_time = time.time()
vgg = vgg.to(device)
total_time += time.time() - start_time

# training setting
num_iter = NUM_ITERS
batch_size = BATCH_SIZE
optimizer = optim.Adam(vgg.parameters(), lr=LERANING_RATE)

# data generator
gen = random_input_generator(num_iter, batch_size, format='NCHW')

# begin training

for idx, data in enumerate(gen):
    x_batch = torch.Tensor(data[0])
    y_batch = torch.Tensor(data[1]).long()

    start_time = time.time()

    x_batch = x_batch.to(device)
    y_batch = y_batch.to(device)

    # forward + backward
    outputs = vgg(x_batch)
    loss = F.cross_entropy(outputs, y_batch)
示例#2
0
info = psutil.virtual_memory()
monitor_interval = MONITOR_INTERVAL
avg_mem_usage = 0
max_mem_usage = 0
count = 0
total_time = 0

# training setting
num_iter = NUM_ITERS
batch_size = BATCH_SIZE
train_weights = vgg.weights
optimizer = tf.optimizers.Adam(learning_rate=LERANING_RATE)
loss_object = tl.cost.cross_entropy

# data generator
gen = random_input_generator(num_iter, batch_size)


# training function
def train_step(x_batch, y_batch):
    # forward + backward
    with tf.GradientTape() as tape:
        ## compute outputs
        _logits = vgg(x_batch)
        ## compute loss and update model
        _loss = loss_object(_logits, y_batch)

    grad = tape.gradient(_loss, train_weights)
    optimizer.apply_gradients(zip(grad, train_weights))

示例#3
0
total_time = 0

# get the whole model
vgg = vgg16()

start_time = time.time()
vgg = vgg.to(device)
total_time += time.time() - start_time

# training setting
num_iter = NUM_ITERS
batch_size = BATCH_SIZE
optimizer = optim.Adam(vgg.parameters(), lr=LERANING_RATE)

# data generator
gen = random_input_generator(num_iter, batch_size, format='NCHW')

# begin training

for idx, data in enumerate(gen):

    start_time = time.time()

    x_batch = torch.Tensor(data[0])
    y_batch = torch.Tensor(data[1]).long()
    x_batch = x_batch.to(device)
    y_batch = y_batch.to(device)

    # forward + backward
    outputs = vgg(x_batch)
    loss = F.cross_entropy(outputs, y_batch)