Пример #1
0
    def train_one_epoch(self, sess, writer, epoch, step):
        print('sub_train_one_epoch')
        sum_acc = 0
        sum_acc_iou = 0
        sum_acc_label = 0
        sum_acc_ellip = 0
        count = 0
        total_loss = 0
        t0 = time.time()
        mean_acc = 0
        mean_acc_iou = 0
        mean_acc_label = 0
        mean_acc_ellip = 0

        if_epoch = cfgs.test_accu
        #if epoch % 5 == 0:
        #    if_epoch = True

        try:
            while count < self.per_e_train_batch:
                step += 1
                count += 1

                #1. train
                images_, cur_ims_, ellip_infos_, annos_, labels, filenames = sess.run(
                    [
                        self.train_images, self.train_cur_ims,
                        self.train_ellip_infos, self.train_annotations,
                        self.train_labels, self.train_filenames
                    ])

                #cv2.imwrite('%s_anno.bmp' % filenames[0], annos_[0]*255)
                #pdb.set_trace()
                cur_batch_size = images_.shape[0]
                if cur_batch_size == cfgs.batch_size:
                    coord_map_x_cur, coord_map_y_cur = self.coord_map_x, self.coord_map_y
                else:
                    coord_map_x_cur, coord_map_y_cur = self.generate_coord_map(
                        cur_batch_size)

                if cur_batch_size != cfgs.batch_size:
                    #pdb.set_trace()
                    last_idx = cur_batch_size - 1
                    last_im = np.expand_dims(images_[last_idx], axis=0)
                    last_cur_im = np.expand_dims(cur_ims_[last_idx], axis=0)
                    last_ellip_info = np.expand_dims(ellip_infos_[last_idx],
                                                     axis=0)
                    last_anno = np.expand_dims(annos_[last_idx], axis=0)
                    last_label = np.expand_dims(labels[last_idx], axis=0)
                    last_fn = np.expand_dims(filenames[last_idx], axis=0)
                    for i in range(cur_batch_size, cfgs.batch_size):
                        images_ = np.append(images_, last_im, axis=0)
                        cur_ims_ = np.append(cur_ims_, last_cur_im, axis=0)
                        ellip_infos_ = np.append(ellip_infos_,
                                                 last_ellip_info,
                                                 axis=0)
                        annos_ = np.append(annos_, last_anno, axis=0)
                        labels = np.append(labels, last_label, axis=0)
                        filenames = np.append(filenames, last_fn, axis=0)

                ellip_info_low = np.min(ellip_infos_[:, 2:], 1) / 2
                ellip_info_high = np.max(ellip_infos_[:, 2:], 1) / 2
                ellip_info_mean = np.mean(ellip_infos_[:, 2:], 1) / 4

                #generate mask ims
                mask_ims, pred_anno_, pred_seq_pro_, summary_str = sess.run(
                    [self.mask_ims, self.mask_anno, self.pro, self.summary_op],
                    feed_dict={
                        self.images:
                        images_,
                        self.annotations:
                        annos_,
                        self.lr:
                        self.learning_rate,
                        self.class_labels:
                        labels,
                        self.keep_prob:
                        cfgs.keep_prob,
                        self.input_keep_prob:
                        1,
                        self.cur_batch_size:
                        cfgs.batch_size,
                        self.coord_x_tensor:
                        coord_map_x_cur,
                        self.coord_y_tensor:
                        coord_map_y_cur,

                        #self.ellip_low: ellip_info_low,
                        #self.ellip_high: ellip_info_high,
                        self.ellip_axis:
                        ellip_info_mean
                    })

                #self.im_mask_view(filenames, mask_ims, pred_seq_pro_, images_, step)
                #self.im_mask_view(filenames, pred_anno_, pred_seq_pro_, images_, step)
                #cv2.imwrite('%s_anno.bmp' % filenames[0], pred_anno_[0]*127)
                #pdb.set_trace()

                #2. calculate accurary
                self.ellip_acc = 0
                self.calculate_acc(mask_ims.copy(),
                                   filenames,
                                   pred_anno_,
                                   pred_seq_pro_,
                                   annos_,
                                   ellip_infos_,
                                   if_epoch=if_epoch)
                #self.calculate_acc(annos_, filenames, pred_anno_, pred_seq_pro_, annos_, ellip_infos_, if_epoch=if_epoch)
                #self.calculate_acc(cur_ims_[:, 2:cfgs.ANNO_IMAGE_SIZE[0]+2, :, :], filenames, pred_anno_, pred_seq_pro_, annos_, ellip_infos_, if_epoch=if_epoch)

                self.accu = 0
                self.accu_iou = 0
                loss = 0
                self.acc_label_ = 0
                #self.ellip_acc = 0

                sum_acc += self.accu
                sum_acc_iou += self.accu_iou
                sum_acc_label += self.acc_label_
                sum_acc_ellip += self.ellip_acc
                mean_acc = sum_acc / count
                mean_acc_iou = sum_acc_iou / count
                mean_acc_label = sum_acc_label / count
                mean_acc_ellip = sum_acc_ellip / count
                #3. calculate loss
                total_loss += loss

                #4. time consume
                time_consumed = time.time() - t0
                time_per_batch = time_consumed / count

                #5. check if change learning rate
                if count % 100 == 0:
                    self.try_update_lr()
                #6. summary
                writer.add_summary(summary_str, global_step=step)

                #6. print
                #print('\r' + 2 * ' ', end='')
                line = 'Train epoch %2d\t lr = %g\t step = %4d\t count = %4d\t loss = %.4f\t m_loss=%.4f\t acc = %.2f%%\t iou_acc = %.2f%%\t acc_label = %.2f%%\t ellip_acc = %.2f\t time = %.2f' % (
                    epoch, self.learning_rate, step, count, loss,
                    (total_loss / count), mean_acc, mean_acc_iou,
                    mean_acc_label, mean_acc_ellip, time_per_batch)
                utils.clear_line(len(line))
                print('\r' + line, end='')
                #print('epoch %5d\t lr = %g\t step = %4d\t count = %4d\t loss = %.4f\t mean_loss=%.4f\t train_acc = %.2f%%\t train_iou_acc = %.2f%%\t train_ellip_acc = %.2f\t time = %.2f' % (epoch, self.learning_rate, step, count, loss, (total_loss/count), mean_acc, mean_acc_iou, mean_acc_ellip, time_per_batch))

            #End one epoch
            #count -= 1
            print(
                '\nepoch %5d\t learning_rate = %g\t mean_loss = %.4f\t train_acc = %.2f%%\t train_iou_acc = %.2f%%\t train_acc_label = %.2f%%\t train_ellip_acc = %.2f'
                % (epoch, self.learning_rate, (total_loss / count),
                   (sum_acc / count), (sum_acc_iou / count),
                   (sum_acc_label / count), (sum_acc_ellip / count)))
            print('Take time %3.1f' % (time.time() - t0))

        except tf.errors.OutOfRangeError:
            print('Error!')
            count -= 1
            print(
                'epoch %5d\t learning_rate = %g\t mean_loss = %.3f\t train_accuracy = %.2f%%\t train_iou_accuracy = %.2f%%'
                % (epoch, self.learning_rate, (total_loss / count),
                   (sum_acc / count), (sum_acc_iou / count)))
            print('Take time %3.1f' % (time.time() - t0))

        return step
Пример #2
0
    def valid_once(self, sess, writer, epoch, step):

        count = 0
        sum_acc = 0
        sum_acc_iou = 0
        sum_acc_label = 0
        sum_acc_ellip = 0
        t0 = time.time()
        
        if_epoch = cfgs.test_accu
        #if epoch % 5 == 0:
        #    if_epoch = True

        try:
            total_loss = 0
            #self.per_e_valid_batch = 2
            while count<self.per_e_valid_batch:
                count +=1
                images_, cur_ims, ellip_infos_, annos_, labels, filenames = sess.run([self.valid_images, self.valid_cur_ims, self.valid_ellip_infos, self.valid_annotations, self.valid_labels, self.valid_filenames])

                cur_batch_size = images_.shape[0]
                if cur_batch_size == cfgs.batch_size:
                    coord_map_x_cur, coord_map_y_cur = self.coord_map_x, self.coord_map_y
                else:
                    coord_map_x_cur, coord_map_y_cur = self.generate_coord_map(cur_batch_size)

                if cur_batch_size != cfgs.batch_size:
                    #pdb.set_trace()
                    last_idx = cur_batch_size - 1
                    last_im = np.expand_dims(images_[last_idx], axis=0)
                    last_cur_im = np.expand_dims(cur_ims[last_idx], axis=0)
                    last_ellip_info = np.expand_dims(ellip_infos_[last_idx], axis=0)
                    last_anno = np.expand_dims(annos_[last_idx], axis=0)
                    last_label = np.expand_dims(labels[last_idx], axis=0)
                    last_fn = np.expand_dims(filenames[last_idx], axis=0)
                    for i in range(cur_batch_size, cfgs.batch_size):
                        images_ = np.append(images_, last_im, axis=0)
                        cur_ims = np.append(cur_ims, last_cur_im, axis=0)
                        ellip_infos_ = np.append(ellip_infos_, last_ellip_info, axis=0)
                        annos_ = np.append(annos_, last_anno, axis=0)
                        labels = np.append(labels, last_label, axis=0)
                        filenames = np.append(filenames, last_fn, axis=0)
   
                    
                ellip_info_low = np.min(ellip_infos_[:, 2:], 1) / 2
                ellip_info_high = np.max(ellip_infos_[:, 2:], 1) / 2
                ellip_info_mean = np.mean(ellip_infos_[:, 2:], 1) / 4

                pred_anno, pred_seq_pro, summary_str, loss, self.accu, self.accu_iou = sess.run(
                fetches=[self.pred_annotation, self.pro, self.summary_op, self.loss, self.accu_tensor, self.accu_iou_tensor],
                #fetches=[self.pred_anno_lower, self.pro, self.summary_op, self.loss, self.accu_tensor_lower, self.accu_iou_tensor_lower, self.acc_label],
                feed_dict={self.images: images_, 
                           self.annotations: annos_, self.lr: self.learning_rate,
                           self.class_labels: labels,
                           self.keep_prob: 1,
                           self.input_keep_prob: 1,
                           self.cur_batch_size: cfgs.batch_size,
                           self.coord_x_tensor: coord_map_x_cur,
                           self.coord_y_tensor: coord_map_y_cur,
                           self.ellip_axis: ellip_info_mean})
                
                #View result
                self.view_valid(filenames, pred_anno, pred_seq_pro, images_, step)


                writer.add_summary(summary_str, global_step=step)
                self.calculate_acc(cur_ims[:, 2:cfgs.ANNO_IMAGE_SIZE[0]+2, :, :].copy(), filenames, pred_anno, pred_seq_pro, annos_, ellip_infos_, True, if_epoch)
                sum_acc += self.accu
                sum_acc_iou += self.accu_iou
                sum_acc_label += 0
                sum_acc_ellip += self.ellip_acc
                total_loss += loss

                line = 'epoch %5d\t learning_rate = %g\t step = %4d\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_acc_label = %.2f%%\t valid_ellip_acc = %.2f' % (epoch, self.learning_rate, step, (total_loss/count), (sum_acc/count), (sum_acc_iou/count), (sum_acc_label/count), (sum_acc_ellip/count))
                utils.clear_line(len(line))
                print('\r' + line, end='')
                #print('\r' + 12 * ' ', end='')
                #print('epoch %5d\t learning_rate = %g\t step = %4d\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_ellip_acc = %.2f' % (epoch, self.learning_rate, step, (total_loss/count), (sum_acc/count), (sum_acc_iou/count), (sum_acc_ellip/count)))
        
            #End valid data
            #count -= 1
            
            print('\nepoch %5d\t learning_rate = %g\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_acc_label = %.2f%%\t valid_ellip_acc = %.2f' % 
            (epoch, self.learning_rate, total_loss/count, sum_acc/count, sum_acc_iou/count, sum_acc_label/count, sum_acc_ellip/count))
            print('Take time %3.1f' % (time.time() - t0))


        except tf.errors.OutOfRangeError:
            print('Error!')
Пример #3
0
    def valid_once(self, sess, writer, epoch, step):

        count = 0
        sum_acc = 0
        sum_acc_iou = 0
        sum_acc_ellip = 0
        t0 = time.time()

        if_epoch = False
        if epoch % 5 == 0:
            if_epoch = True

        try:
            total_loss = 0
            #self.per_e_valid_batch = 2
            while count < self.per_e_valid_batch:
                count += 1
                images_, cur_ims, ellip_infos_, annos_, filenames = sess.run([
                    self.valid_images, self.valid_cur_ims,
                    self.valid_ellip_infos, self.valid_annotations,
                    self.valid_filenames
                ])

                cur_batch_size = images_.shape[0]
                if cur_batch_size == cfgs.batch_size:
                    coord_map_x_cur, coord_map_y_cur = self.coord_map_x, self.coord_map_y
                else:
                    coord_map_x_cur, coord_map_y_cur = self.generate_coord_map(
                        cur_batch_size)

                if cur_batch_size == cfgs.batch_size:

                    ellip_info_low = np.min(ellip_infos_[:, 2:], 1) / 2
                    ellip_info_high = np.max(ellip_infos_[:, 2:], 1) / 2
                    ellip_info_mean = np.mean(ellip_infos_[:, 2:], 1) / 4

                    mask_ims, pred_anno_, pred_seq_pro_, summary_str = sess.run(
                        #fetches=[self.pred_annotation, self.pro, self.summary_op, self.loss, self.accu_tensor, self.accu_iou_tensor],
                        #fetches=[self.pred_anno_lower, self.pro, self.summary_op, self.loss, self.accu_tensor_lower, self.accu_iou_tensor_lower],
                        fetches=[
                            self.mask_ims, self.mask_anno, self.pro,
                            self.summary_op
                        ],
                        feed_dict={
                            self.images: images_,
                            self.annotations: annos_,
                            self.lr: self.learning_rate,
                            self.keep_prob: 1,
                            self.input_keep_prob: 1,
                            self.cur_batch_size: cur_batch_size,
                            self.coord_x_tensor: coord_map_x_cur,
                            self.coord_y_tensor: coord_map_y_cur,
                            self.ellip_axis: ellip_info_mean
                        })

                    #View result
                    #self.view_valid(filenames, pred_anno, pred_seq_pro, images_, step)
                    #self.im_mask_view_valid(filenames, pred_anno, pred_seq_pro, images_, step)
                    #self.im_mask_view_valid(filenames, mask_ims, pred_seq_pro_, images_, step)

                    writer.add_summary(summary_str, global_step=step)
                    self.ellip_acc = 0
                    #self.calculate_acc(annos_, filenames, pred_anno_, pred_seq_pro_, annos_, ellip_infos_, True, if_epoch)
                    self.calculate_acc(mask_ims.copy(),
                                       filenames,
                                       pred_anno_,
                                       pred_seq_pro_,
                                       annos_,
                                       ellip_infos_,
                                       if_epoch=if_epoch)
                    #self.calculate_acc(cur_ims[:, 2:cfgs.ANNO_IMAGE_SIZE[0]+2, :, :], filenames, pred_anno_, pred_seq_pro_, annos_, ellip_infos_, if_epoch=if_epoch)

                    self.accu = 0
                    self.accu_iou = 0
                    loss = 0
                    sum_acc += self.accu
                    sum_acc_iou += self.accu_iou
                    sum_acc_ellip += self.ellip_acc
                    total_loss += loss
                    #print('\r' + 12 * ' ', end='')
                    line = 'epoch %5d\t learning_rate = %g\t step = %4d\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_ellip_acc = %.2f' % (
                        epoch, self.learning_rate, step, (total_loss / count),
                        (sum_acc / count), (sum_acc_iou / count),
                        (sum_acc_ellip / count))
                    utils.clear_line(len(line))
                    print('\r' + line, end='')

            #End valid data
            #count -= 1
            print(
                'epoch %5d\t learning_rate = %g\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_ellip_acc = %.2f'
                % (epoch, self.learning_rate, total_loss / count, sum_acc /
                   count, sum_acc_iou / count, sum_acc_ellip / count))
            print('Take time %3.1f' % (time.time() - t0))

        except tf.errors.OutOfRangeError:
            print('Error!')
Пример #4
0
    def valid_once(self, sess, writer, epoch, step):

        count = 0
        sum_acc = 0
        sum_acc_iou = 0
        sum_acc_ellip = 0
        t0 = time.time()

        if_epoch = cfgs.test_accu
        #if epoch % 5 == 0:
        #    if_epoch = True

        try:
            total_loss = 0
            #self.per_e_valid_batch = 2
            while count < self.per_e_valid_batch:
                count += 1
                images_, cur_ims, ellip_infos_, annos_, filenames = sess.run([
                    self.valid_images, self.valid_cur_ims,
                    self.valid_ellip_infos, self.valid_annotations,
                    self.valid_filenames
                ])

                cur_batch_size = images_.shape[0]
                if cur_batch_size == cfgs.batch_size:
                    coord_map_x_cur, coord_map_y_cur = self.coord_map_x, self.coord_map_y
                else:
                    coord_map_x_cur, coord_map_y_cur = self.generate_coord_map(
                        cur_batch_size)

                if cur_batch_size != cfgs.batch_size:
                    #pdb.set_trace()
                    last_idx = cur_batch_size - 1
                    last_im = np.expand_dims(images_[last_idx], axis=0)
                    last_cur_im = np.expand_dims(cur_ims[last_idx], axis=0)
                    last_ellip_info = np.expand_dims(ellip_infos_[last_idx],
                                                     axis=0)
                    last_anno = np.expand_dims(annos_[last_idx], axis=0)
                    last_fn = np.expand_dims(filenames[last_idx], axis=0)
                    for i in range(cur_batch_size, cfgs.batch_size):
                        images_ = np.append(images_, last_im, axis=0)
                        cur_ims = np.append(cur_ims, last_cur_im, axis=0)
                        ellip_infos_ = np.append(ellip_infos_,
                                                 last_ellip_info,
                                                 axis=0)
                        annos_ = np.append(annos_, last_anno, axis=0)
                        filenames = np.append(filenames, last_fn, axis=0)

                ellip_info_low = np.min(ellip_infos_[:, 2:], 1) / 2
                ellip_info_high = np.max(ellip_infos_[:, 2:], 1) / 2
                ellip_info_mean = np.mean(ellip_infos_[:, 2:], 1) / 4

                pred_anno, pred_seq_pro, summary_str, loss, self.accu, self.accu_iou = sess.run(
                    fetches=[
                        self.pred_annotation, self.pro, self.summary_op,
                        self.loss, self.accu_tensor, self.accu_iou_tensor
                    ],
                    #fetches=[self.pred_anno_lower, self.pro, self.summary_op, self.loss, self.accu_tensor_lower, self.accu_iou_tensor_lower],
                    feed_dict={
                        self.images: images_,
                        self.annotations: annos_,
                        self.lr: self.learning_rate,
                        self.keep_prob: 1,
                        self.input_keep_prob: 1,
                        self.cur_batch_size: cfgs.batch_size,
                        self.coord_x_tensor: coord_map_x_cur,
                        self.coord_y_tensor: coord_map_y_cur,
                        self.ellip_axis: ellip_info_mean
                    })

                #View result
                self.view_valid(filenames, pred_anno, pred_seq_pro, images_,
                                step)

                writer.add_summary(summary_str, global_step=step)

                thresh = cfgs.start_thresh
                min_ellip_loss = 1000
                min_thresh = thresh
                for i in range(cfgs.test_thresh_num):

                    test_pred_anno = sess.run(
                        fetches=[self.pred_anno_test_thresh],
                        feed_dict={
                            self.pro_find: pred_seq_pro,
                            self.test_thresh: thresh,
                            self.cur_batch_size: cfgs.batch_size
                        })

                    test_pred_anno = np.squeeze(np.array(test_pred_anno),
                                                axis=0)

                    self.calculate_acc(
                        cur_ims[:, 2:cfgs.ANNO_IMAGE_SIZE[0] + 2, :, :].copy(),
                        filenames, test_pred_anno, pred_seq_pro, annos_,
                        ellip_infos_, True, if_epoch)

                    if self.ellip_acc < min_ellip_loss:
                        min_ellip_loss = self.ellip_acc
                        min_thresh = thresh
                    thresh += cfgs.interval
                self.find_thresh = self.find_thresh * (
                    1 - cfgs.thresh_lr) + min_thresh * cfgs.thresh_lr

                sum_acc += self.accu
                sum_acc_iou += self.accu_iou
                sum_acc_ellip += min_ellip_loss
                mean_acc = sum_acc / count
                mean_acc_iou = sum_acc_iou / count
                mean_acc_ellip = sum_acc_ellip / count
                #3. calculate loss
                total_loss += loss

                #4. time consume
                time_consumed = time.time() - t0
                time_per_batch = time_consumed / count

                line = 'epoch %5d\t lr = %g\t count = %4d\t loss = %.4f\t mean_loss=%.4f\t  train_ellip_acc = %.2f\t choose_thresh = %.2f\t cur_thresh = %.3f\t time = %.2f' % (
                    epoch, self.learning_rate, count, loss,
                    (total_loss / count), min_ellip_loss, min_thresh,
                    self.find_thresh, time_per_batch)

                utils.clear_line(len(line))
                print('\r' + line, end='')
                #End valid data
            #count -= 1
            print(
                '\nepoch %5d\t learning_rate = %g\t loss = %.4f\t valid_accuracy = %.2f%%\t valid_iou_accuracy = %.2f%%\t valid_ellip_acc = %.2f'
                % (epoch, self.learning_rate, total_loss / count, sum_acc /
                   count, sum_acc_iou / count, sum_acc_ellip / count))
            print('Take time %3.1f' % (time.time() - t0))

        except tf.errors.OutOfRangeError:
            print('Error!')