def inference_resnet_own(images, training): # reference: https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_main.py resnet_size = FLAGS.resnet_type choices = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3] } if resnet_size < 50: bottleneck = False final_size = 512 else: bottleneck = True final_size = 2048 resnet = resnet_model.Model(resnet_size=resnet_size, bottleneck=bottleneck, num_classes=NUM_CLASSES + 1, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=choices[resnet_size], block_strides=[1, 2, 2, 2], final_size=final_size, resnet_version=2) softmax_linear = resnet(images, training) return softmax_linear
def main(_): # Specify which gpu to be used # os.environ["CUDA_VISIBLE_DEVICES"] = '1' cls_model = resnet_model.Model(resnet_size=50, bottleneck=True, num_classes=26, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=[3, 4, 6, 3], block_strides=[1, 2, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_first', dtype=resnet_model.DEFAULT_DTYPE) if FLAGS.input_shape: input_shape = [ int(dim) if dim != -1 else None for dim in FLAGS.input_shape.split(',') ] else: input_shape = [None, None, None, 3] exporter.export_inference_graph(FLAGS.input_type, cls_model, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory, input_shape)
def inference(images, training): # reference: https://github.com/tensorflow/models/blob/master/official/resnet/cifar10_main.py resnet = resnet_model.Model(resnet_size=32, bottleneck=False, num_classes=NUM_CLASSES, num_filters=16, kernel_size=3, conv_stride=1, first_pool_size=None, first_pool_stride=None, block_sizes=[5, 5, 5], block_strides=[1, 2, 2], final_size=64, resnet_version=2) softmax_linear = resnet(images, training) return softmax_linear
def dnn(image, training): """ function which calls resnet model with proper initialization for cifar10 model :param image: input image tensor :return: model output tensor node """ resnet_object = resnet_model.Model(resnet_size=32, bottleneck=False, num_classes=10, num_filters=16, kernel_size=3, conv_stride=1, first_pool_size=None, first_pool_stride=None, block_sizes=[5, 5, 5], block_strides=[1, 2, 2], final_size=64, data_format='channels_last') update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_op): logits = resnet_object(image, training=training) return logits
def FCN2(image, mean, variance, phase): """ function defining fcn model :param image: input image tensor in the flattened form :return: model output tensor node """ image = tf.cast(image, tf.float32) image_reshape = tf.reshape(image, [-1, 512, 512, 3]) image_norm = tf.nn.batch_normalization(image_reshape, mean, variance, None, None, 0.0001) model = resnet_model.Model( resnet_size=50, bottleneck=False, num_classes=2, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=[3, 4, 6, 3], block_strides=[1, 2, 2, 2], resnet_version=1, data_format='channels_last', ) resnet_out = model(image_norm, phase) intermediate_out = [ v.values()[0] for v in tf.get_default_graph().get_operations() if 'block_layer' in v.name ] conv1 = tf.layers.conv2d(intermediate_out[3], filters=512, kernel_size=[1, 1], strides=(1, 1), padding='same', activation=tf.nn.relu, name='conv1_1x1') conv2 = tf.layers.conv2d(conv1, filters=512, kernel_size=[1, 1], strides=(1, 1), padding='same', activation=tf.nn.relu, name='conv2_1x1') up1 = tf.layers.conv2d_transpose(conv2, filters=64, kernel_size=[16, 16], strides=(8, 8), padding='same', activation=tf.nn.relu, name='upsample_1') up2 = tf.layers.conv2d_transpose(intermediate_out[2], filters=64, kernel_size=[8, 8], strides=(4, 4), padding='same', activation=tf.nn.relu, name='upsample_2') up3 = tf.layers.conv2d_transpose(intermediate_out[1], filters=64, kernel_size=[4, 4], strides=(2, 2), padding='same', activation=tf.nn.relu, name='upsample_3') up4 = intermediate_out[0] up5 = tf.layers.conv2d_transpose(up4 + up3 + up2 + up1, filters=2, kernel_size=[8, 8], strides=(4, 4), padding='same', activation=None, name='upsample_5') return up5
default='/cluster/project/infk/hilliges/lectures/mp20/project2/', help='path to the dataset') parser.add_argument('--batch_size', type=int, default=1, help='batch size') parser.add_argument('--log_dir', type=str, default='./example', help='log storage dir for tensorboard') opt = parser.parse_args() with tf.Session() as sess: # define resnet model sample = create_test_dataloader(data_root=opt.data_root, batch_size=opt.batch_size) with tf.variable_scope('model'): model = resnet_model.Model() p3d_out_norm = model(sample['image'], training=False) p3d_out = unnormalize_pose(p3d_out_norm) p3d_out = tf.reshape(p3d_out, [-1, 51]) # restore weights saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir)) predictions = None with trange(math.ceil(meta_info.NUM_SAMPLES_TEST / opt.batch_size)) as t: for i in t: p3d_out_ = sess.run(p3d_out) if predictions is None: predictions = p3d_out_
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input_dir", "-i") args = parser.parse_args() data_dir = args.input_dir sess = tf.Session() model = resnet_model.Model(resnet_size=RESNET_SIZE, bottleneck=False, num_classes=NUM_CLASSES, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(RESNET_SIZE), block_strides=[1, 2, 2, 2], resnet_version=RESNET_VERSION, data_format=None, dtype=DTYPE, sess=sess) print("starting") dataset = data.custom_input_fn(data_dir, batch_size=BATCH_SIZE) iterator = dataset.make_initializable_iterator() it_init = iterator.initializer sess.run(it_init) outs, loss = model.network(iterator, training=TRAINING)
def resnet_model_fn(features, labels, mode, params): # Generate a summary node for the images tf.compat.v1.summary.image('images', features, max_outputs=6) # Checks that features/images have same data type being used for calculations. assert features.dtype == resnet_model.DEFAULT_DTYPE resnet_size = params['resnet_size'] if resnet_size < 50: bottleneck = False else: bottleneck = True model = resnet_model.Model(resnet_size=resnet_size, bottleneck=bottleneck, num_classes=FLAGS.num_classes, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(resnet_size), block_strides=[1, 2, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_first', dtype=resnet_model.DEFAULT_DTYPE) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) # This acts as a no-op if the logits are already in fp32 (provided logits are # not a SparseTensor). If dtype is is low precision, logits must be cast to # fp32 for numerical stability. logits = tf.cast(logits, tf.float32) predictions = { 'classes': tf.argmax(input=logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: # Return the predictions and the specification for serving a SavedModel return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, export_outputs={ 'predict': tf.estimator.export.PredictOutput(predictions) }) # Calculate loss, which includes softmax cross entropy and L2 regularization. if FLAGS.label_smoothing != 0.0: one_hot_labels = tf.one_hot(labels, 1001) cross_entropy = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=one_hot_labels, label_smoothing=FLAGS.label_smoothing) else: cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy( logits=logits, labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.compat.v1.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. def loss_filter_fn(_): return True # Add weight decay to the loss. weight_decay = FLAGS.weight_decay l2_loss = weight_decay * tf.add_n( # loss is computed using fp32 for numerical stability. [ tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.compat.v1.trainable_variables() if loss_filter_fn(v.name) ]) tf.compat.v1.summary.scalar('l2_loss', l2_loss) loss = cross_entropy + l2_loss scaffold = None if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.compat.v1.train.get_or_create_global_step() learning_rate = configure_learning_rate(FLAGS.decay_steps, global_step) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.compat.v1.summary.scalar('learning_rate', learning_rate) momentum = FLAGS.momentum if FLAGS.enable_lars: optimizer = tf.contrib.opt.LARSOptimizer( learning_rate, momentum=momentum, weight_decay=weight_decay, skip_list=['batch_normalization', 'bias']) else: optimizer = tf.compat.v1.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum) # loss_scale = FLAGS.loss_scale # fp16_implementation = FLAGS.fp16_implementation # if fp16_implementation == 'graph_rewrite': # optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite( # optimizer, loss_scale=loss_scale) def _dense_grad_filter(gvs): """Only apply gradient updates to the final layer. This function is used for fine tuning. Args: gvs: list of tuples with gradients and variable info Returns: filtered gradients so that only the dense layer remains """ return [(g, v) for g, v in gvs if 'dense' in v.name] # if loss_scale != 1 and fp16_implementation != 'graph_rewrite': # # When computing fp16 gradients, often intermediate tensor values are # # so small, they underflow to 0. To avoid this, we multiply the loss by # # loss_scale to make these tensor values loss_scale times bigger. # scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale) # # if fine_tune: # scaled_grad_vars = _dense_grad_filter(scaled_grad_vars) # # # Once the gradient computation is complete we can scale the gradients # # back to the correct scale before passing them to the optimizer. # unscaled_grad_vars = [(grad / loss_scale, var) # for grad, var in scaled_grad_vars] # minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step) # else: grad_vars = optimizer.compute_gradients(loss) # if fine_tune: # grad_vars = _dense_grad_filter(grad_vars) minimize_op = optimizer.apply_gradients(grad_vars, global_step) update_ops = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) # keep_checkpoint_every_n_hours = FLAGS.keep_checkpoint_every_n_hours # saver = tf.train.Saver( # sharded=True, # keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, # save_relative_paths=True) # # if not tf.get_collection(tf.GraphKeys.SAVERS): # tf.add_to_collection(tf.GraphKeys.SAVERS, saver) # scaffold = tf.train.Scaffold(saver=saver) else: train_op = None accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes']) accuracy_top_5 = tf.compat.v1.metrics.mean( tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.identity(accuracy_top_5[1], name='train_accuracy_top_5') tf.compat.v1.summary.scalar('train_accuracy', accuracy[1]) tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics, scaffold=scaffold)
with tf.Session(config=config) as sess: # Read data if opt.val_subject in TRAINING_SUBJECT: TRAINING_SUBJECT.remove(opt.val_subject) sample = create_H36_dataloader(data_root=opt.data_root, batch_size=opt.batch_size, subjects=TRAINING_SUBJECT) image, pose3d_gt = sample['image'], sample['pose3d'] # Normalize pose pose3d_gt_norm = normalize_pose(pose3d_gt) # Predict pose with tf.variable_scope("model", reuse=False): model = resnet_model.Model() pose3d_out_norm = model(image, training=True) # Compare with GT loss = tf.losses.absolute_difference(pose3d_gt_norm, pose3d_out_norm) # Optimize network parameters update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # for batch norm with tf.control_dependencies(update_ops): train_op = tf.train.AdamOptimizer(learning_rate=opt.lr).minimize(loss) # Unnormalize pose pose3d_out = unnormalize_pose(pose3d_out_norm) # Validation graph if not opt.val_subject == -1:
def build_graph(self, model='mnist'): xinit = tf.contrib.layers.xavier_initializer binit = tf.constant_initializer(0.0) relu = tf.nn.relu g = tf.get_default_graph() self.lr = tf.placeholder(tf.float32, name='lr') self.beta1 = tf.placeholder(tf.float32, name='beta1') self.eps_t = tf.placeholder(tf.float32, name='eps_t') self.trn_ph = tf.placeholder(tf.bool, name='train_ph') self.weight_decay = tf.placeholder(tf.float32, name='weight_decay') if model == 'mnist': self.x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name='input_ph') self.y = tf.placeholder(tf.int32, shape=(None, ), name='label_ph') out = self.x out = tf.layers.conv2d(out, 32, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.layers.conv2d(out, 64, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.layers.conv2d(out, 128, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.reshape(out, [-1, np.prod(out.get_shape().as_list()[1:])]) out = tf.layers.dropout(out, rate=0.5, training=self.trn_ph) logits = tf.layers.dense(out, 10, kernel_initializer=xinit(), bias_initializer=binit) self.pred = tf.argmax(logits, axis=1, name='pred_op') elif model == 'mnist_binary': self.x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name='input_ph') self.y = tf.placeholder(tf.int32, shape=(None, ), name='label_ph') out = self.x out = tf.layers.conv2d(out, 32, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.layers.conv2d(out, 64, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.layers.conv2d(out, 128, 3, strides=2, activation=relu, padding='same', kernel_initializer=xinit(), bias_initializer=binit) out = tf.reshape(out, [-1, np.prod(out.get_shape().as_list()[1:])]) out = tf.layers.dropout(out, rate=0.5, training=self.trn_ph) logits = tf.layers.dense(out, 2, kernel_initializer=xinit(), bias_initializer=binit) self.pred = tf.argmax(logits, axis=1, name='pred_op') elif model == 'cifar10': self.x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_ph') self.y = tf.placeholder(tf.int32, shape=(None, ), name='label_ph') resnet_size = 20 num_blocks = (resnet_size - 2) // 6 conv = resnet_model.Model( resnet_size=resnet_size, bottleneck=False, num_classes=10, num_filters=16, kernel_size=3, conv_stride=1, first_pool_size=None, first_pool_stride=None, block_sizes=[num_blocks] * 3, block_strides=[1, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_last') logits = conv(self.x, self.trn_ph) self.pred = tf.argmax(logits, axis=1, name='pred_op') elif model == 'cifar100': self.x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_ph') self.y = tf.placeholder(tf.int32, shape=(None, ), name='label_ph') resnet_size = 20 num_blocks = (resnet_size - 2) // 6 conv = resnet_model.Model( resnet_size=resnet_size, bottleneck=False, num_classes=100, num_filters=16, kernel_size=3, conv_stride=1, first_pool_size=None, first_pool_stride=None, block_sizes=[num_blocks] * 3, block_strides=[1, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_last') logits = conv(self.x, self.trn_ph) self.pred = tf.argmax(logits, axis=1, name='pred_op') loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=self.y) # Add weight decay to the loss. def exclude_batch_norm(name): return 'BatchNorm' not in name loss_filter_fn = exclude_batch_norm l2_loss = self.weight_decay * tf.add_n([ tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables() if loss_filter_fn(v.name) ]) loss += l2_loss # optimizer update opt = self.default_hparams()['opt'] if opt.lower() == 'adam': self.opt = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=tf.reshape(self.beta1, [])) elif opt.lower() == 'momentum': self.opt = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=tf.reshape( self.beta1, [])) self.opt_op = self.opt.minimize(loss) # langevin updates grads, varlist = list(zip(*self.opt.compute_gradients(loss))) grads = [(g + tf.random_normal( g.get_shape().as_list(), mean=0.0, stddev=self.eps_t)) for g in grads] self.lang_op = self.opt.apply_gradients(list(zip(grads, varlist)))
def tower_loss(scope, images, labels, is_training, human_to_label): """Calculate the total loss on a single tower running the model. Args: scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0' images: Images. 4D tensor of shape [batch_size, height, width, 3]. labels: Labels. 2D tensor of shape [batch_size, num_classes]. Returns: Tensor of shape [] containing the total loss for a batch of data """ # TODO scope? weight decay? validation_test? embedding? accuracy? saver? restored? # TODO Calculate loss, which includes softmax cross entropy and L2 regularization. # Build inference Graph. model = resnet_model.Model( resnet_size=18, bottleneck=False, num_classes=data_utils.NUM_CLASSES, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(18), block_strides=[1, 2, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_last', dtype=tf.float32) logits = model(images, training=is_training) # weights = tf.multiply(tf.cast(labels, tf.float32), 1000.0) + 1.0 # example: labels[0,0,1,1,0] -> weights[1,1,7179,7179,1] cross_entropy = tf.losses.sigmoid_cross_entropy( logits=logits, multi_class_labels=labels, weights=1.0, label_smoothing=0.0) # log summary of single gpu on tensorboard if 'tower_0' in scope: tf.summary.histogram('logits', logits, collections=LOG_COLLECTIONS) tf.summary.scalar( 'loss_cross_entropy', cross_entropy, collections=LOG_COLLECTIONS, family='iic') logits_round = tf.cast(tf.round(tf.sigmoid(logits)), tf.int32) CM = tf_utils.Confusion_Matrix(logits_round, labels) tf.summary.scalar( 'MEAN/f2_score', CM.f2_score, collections=LOG_COLLECTIONS, family='iic') tf.summary.scalar( 'MEAN/precision', CM.precision, collections=LOG_COLLECTIONS, family='iic') tf.summary.scalar( 'MEAN/recall', CM.recall, collections=LOG_COLLECTIONS, family='iic') for label_name in TARGET_LABELS: label_int = human_to_label[label_name] CM_target = tf_utils.Confusion_Matrix(logits_round[:, label_int], labels[:, label_int]) tf.summary.scalar( '{}/f2_score'.format(label_name), CM_target.f2_score, collections=LOG_COLLECTIONS, family=label_name) tf.summary.scalar( '{}/precision'.format(label_name), CM_target.precision, collections=LOG_COLLECTIONS, family=label_name) tf.summary.scalar( '{}/recall'.format(label_name), CM_target.recall, collections=LOG_COLLECTIONS, family=label_name) # # Assemble all of the losses for the current tower only. # losses = tf.get_collection('losses', scope) # # Calculate the total loss for the current tower. # total_loss = tf.add_n(losses, name='total_loss') # # Attach a scalar summary to all individual losses and the total loss; do the # # same for the averaged version of the losses. # for l in losses + [total_loss]: # # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # # session. This helps the clarity of presentation on tensorboard. # loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name) # tf.summary.scalar(loss_name, l) return cross_entropy
def eval(): LABEL_TO_CLASS_PATH = '../inputs/label_to_class.json' with open(LABEL_TO_CLASS_PATH, 'r') as infile: label_class_mapping = json.load(infile) with tf.Graph().as_default() as graph, tf.device('/cpu:0'): data_dict = data_generator() batch_filenames = data_dict['name'] batch_images = data_dict['image'] # Calculate the gradients for each model tower. is_training = tf.placeholder(tf.bool) with tf.variable_scope(tf.get_variable_scope()): with tf.device('/gpu:0'): model = resnet_model.Model( resnet_size=18, bottleneck=False, num_classes=data_utils.NUM_CLASSES, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(18), block_strides=[1, 2, 2, 2], resnet_version=resnet_model.DEFAULT_VERSION, data_format='channels_last', dtype=tf.float32) logits = model(batch_images, training=is_training) logits_round = tf.cast(tf.round(tf.sigmoid(logits)), tf.int32) saver = tf.train.Saver() init = tf.global_variables_initializer() init_local = tf.local_variables_initializer() with tf.Session(config=tf.ConfigProto( allow_soft_placement=True)) as sess: sess.run([init, init_local]) # sess.run(iter_init) if FLAGS.restore_path is not None: saver.restore(sess, FLAGS.restore_path) print('successfully restore model from checkpoint: %s' % (FLAGS.restore_path)) # Create a coordinator and run all QueueRunner objects coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) predicted = [] start_time = time.time() while True: try: filenames, predictions = sess.run( [batch_filenames, logits_round], feed_dict={is_training: False}) conds = np.not_equal(predictions, 0) code_result = [] for cond in conds: results = np.where(cond) results = list( map( lambda x: label_class_mapping[ 'label_to_code'][str(x)], results[0])) str_result = '' for res in results: str_result = str_result + ' ' + res code_result.append(str_result) filenames = list( map(lambda x: os.path.split(x)[1][:-4], filenames)) for fname, result in zip(filenames, code_result): predicted.append({ 'image_id': fname.decode('utf-8'), 'labels': result }) print(len(predicted)) except tf.errors.OutOfRangeError: duration = time.time() - start_time print('OutOfRangeError, and time cost: {}'.format( duration)) submission = pd.read_csv( '../labels/stage_1_sample_submission.csv', index_col='image_id') tuning_labels = pd.read_csv( '../labels/tuning_labels.csv', names=['id', 'labels'], index_col=['id']) predicted_df = pd.DataFrame.from_dict(predicted, orient='columns') predicted_df = predicted_df.set_index('image_id') submission['labels'] = None submission.update(predicted_df) submission.update(tuning_labels) submission.to_csv(FLAGS.result_path) break # Stop the threads coord.request_stop() # Wait for threads to stop coord.join(threads)