示例#1
0
    tvars, grads, predE, predA, predB, predK, l2loss, grad_predE, \
    grad_predA, grad_predB, grad_predK, grad_l2loss, x, y_ = CNN_B(im_size, out_size, L,
                                                                 batch_size=1,wd=0.01,layers=len(numfilt),numfilt=numfilt)

#Initialize CNN
optimizer = tf.train.AdamOptimizer(1e-5, epsilon=1e-7)
apply_gradients = optimizer.apply_gradients(zip(grads, tvars))

###########################################################################################
# DEFINE SNAKE INFERENCE
###########################################################################################
niter = 50
print('Creating snake inference graph...', flush=True)
with tf.device('/cpu:0'):
    tf_u, tf_v, tf_du, tf_dv, tf_Du, tf_Dv, tf_u0, tf_v0, tf_du0, tf_dv0, \
    tf_alpha, tf_beta, tf_kappa = snake_graph(out_size, L,niter=niter)

###########################################################################################
#Prepare folder to save network
###########################################################################################
print('Preparing model folder...', flush=True)
start_epoch = 0
if not os.path.isdir(model_path):
    os.makedirs(model_path)

# Add ops to save and restore all the variables.
saver = tf.train.Saver()


###########################################################################################
# DEFINE EPOCH
示例#2
0
    ]
    epoch = -1
    for s in modelnames:
        epoch = max(int(s.split('-')[-1].split('.')[0]), epoch)
    start_epoch = epoch + 1

# Add ops to save and restore all the variables.
saver = tf.train.Saver()

#Initialize CNN
optimizer = tf.train.AdamOptimizer(1e-4, epsilon=1e-6)
apply_gradients = optimizer.apply_gradients(zip(grads, tvars))

with tf.device('/cpu:0'):
    tf_u, tf_v, tf_du, tf_dv, tf_Du, tf_Dv, tf_u0, tf_v0, tf_du0, tf_dv0, \
    tf_alpha, tf_beta, tf_kappa = snake_graph(out_size, L)


def epoch(n, i, mode):
    # mode (str): train or test
    batch_ind = np.arange(i, i + batch_size)
    batch = np.copy(images[:, :, :, batch_ind])
    batch_mask = np.copy(masks[:, :, :, batch_ind])
    thisGT = np.copy(GT[:, :, batch_ind[0]])
    if mode is 'train':
        ang = np.random.rand() * 360
        for j in range(len(batch_ind)):
            for b in range(batch.shape[2]):
                batch[:, :, b, j] = imrotate(batch[:, :, b, j], ang)
            batch_mask[:, :, 0, j] = imrotate(batch_mask[:, :, 0, j],
                                              ang,