Пример #1
0
def main():

    ########### 读取配置文件 ##########
    ch = config.ConfigHandler("./config.ini")
    ch.load_config()


    ########### 读取参数 ##########
    train_batch_size = int(ch.config["model"]["train_batch_size"])
    val_batch_size = int(ch.config["model"]["val_batch_size"])
    num_epochs = int(ch.config["model"]["num_epochs"])
    learning_rate = float(ch.config["model"]["learning_rate"])
    class_size = int(ch.config["model"]["class_size"])
    log_interval = int(ch.config["log"]["log_interval"])


    ########### 获取数据loader ##########
    data_loader = dataset.MyDataset(train_batch_size, val_batch_size)

    data_loader_train =data_loader.load_train_data()
    data_loader_test = data_loader.load_test_data()


    ########### 训练和评价 ##########
    train.train_and_test(num_epochs, learning_rate, class_size, data_loader_train, data_loader_test, log_interval).train_epoch()
Пример #2
0
def load_data(image_dir, label_dir):
    # Prepare dataset
    # data loader for easy mini-batch return in testing

    data = dataset.MyDataset(image_dir=image_dir, label_dir=label_dir, transform=True)

    loader = Data.DataLoader(dataset=data, batch_size=BATCH_SIZE, shuffle=False)

    return loader
Пример #3
0
def load_data(image_dir, label_dir):
    '''
    data loader for every mini-batch return in the training/validation/test set
    '''

    data = dataset.MyDataset(image_dir=image_dir,
                             label_dir=label_dir,
                             transform=True)

    loader = Data.DataLoader(dataset=data,
                             batch_size=opt.batch_size,
                             shuffle=True)

    return loader
Пример #4
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, ntu_CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()
  utils.load(model, args.model_path)

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()

  validset = dataset.MyDataset('/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/kinetics_convert/test.txt',
                               transform = transform.ToTensor())
  valid_queue = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=False, num_workers=1)

  # _, test_transform = utils._data_transforms_cifar10(args)
  # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
  #
  # test_queue = torch.utils.data.DataLoader(
  #     test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  model.drop_path_prob = args.drop_path_prob
  test_acc, test_obj = infer(valid_queue, model, criterion)
  logging.info('test_acc %f', test_acc)
Пример #5
0
    output = output.permute(0, 2, 3, 1)
    output = output.reshape(output.size(0), output.size(1), output.size(2), 3,
                            -1)

    mask_obj = target[..., 0] > 0
    mask_noobj = target[..., 0] == 0

    loss_obj = torch.mean((output[mask_obj] - target[mask_obj])**2)
    loss_noobj = torch.mean((output[mask_noobj][0] - target[mask_noobj][0])**2)
    loss = alpha * loss_obj + (1 - alpha) * loss_noobj
    return loss


if __name__ == '__main__':

    myDataset = dataset.MyDataset()
    train_loader = torch.utils.data.DataLoader(myDataset,
                                               batch_size=2,
                                               shuffle=True)

    net = Darknet53()
    net.train()

    opt = torch.optim.Adam(net.parameters())

    for target_13, target_26, target_52, img_data in train_loader:
        output_13, output_26, output_52 = net(img_data)
        loss_13 = loss_fn(output_13, target_13, 0.9)
        loss_26 = loss_fn(output_26, target_26, 0.9)
        loss_52 = loss_fn(output_52, target_52, 0.9)
def main():

  train_dir = r'F:\Lab408\jinzhengu\root\shuffled_divided\train'
  eval_dir = r'F:\Lab408\jinzhengu\root\shuffled_divided\eval'
  log_dir = r'F:\Lab408\jinzhengu\root\monitored_sess_log'
  ckpt_dir = path.join(log_dir, 'ckpts')
  # model_dir = r'F:\Lab408\jinzhengu\root\monitored_sess_log'
  
  eval_interval = 20
  
  save_summary_steps = 5
  save_ckpts_steps = 100
  train_batchsz = 20
  eval_batchsz = 50
  # eval_steps = 40
  epoch = 900
  img_num = 870 * 2
  max_steps = (img_num * epoch) // train_batchsz


  # ------------------------------ prepare input ------------------------------
  dset = dataset.MyDataset(train_dir, eval_dir)
  prefetch_batch = None
  iter_dict = {
    'train': dset.train(train_batchsz, prefetch_batch),
    'eval': dset.eval(eval_batchsz, prefetch_batch)
  }
  # train_iter = dset.train(train_batchsz, prefetch_batch)
  # eval_iter = dset.eval(eval_batchsz, prefetch_batch)
  # dict_tsr_handle = {
    # 'train': train_iter.string_handle(),
    # 'eval': eval_iter.string_handle()
  # }

  holder_handle = tf.placeholder(tf.string, [])
  iter = tf.data.Iterator.from_string_handle(
      holder_handle, iter_dict['train'].output_types)
  # next_elem = iter.get_next()
  inputx, labels, filename = iter.get_next()
  inputx = tf.reshape(inputx, [-1, 200, 250, 3])
  inputx = tf.transpose(inputx, [0, 3, 1, 2]) # nchw
  # eval_x.set_shape([eval_batchsz, 200, 250, 3])

  # train_x, train_y, train_fname = dset.train(train_batchsz, prefetch_batch)
  # train_x.set_shape([train_batchsz, 200, 250, 3])
  # eval_x, eval_y, eval_fname = dset.eval(eval_batchsz, prefetch_batch)
  # eval_x.set_shape([eval_batchsz, 200, 250, 3])

  # \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ build graph \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
  model = smodel.Simplest('NCHW')
  # model = smodel.Simplest('NHWC')
  logits = model(inputx)
  with tf.name_scope('cross_entropy'):
    loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
    
  with tf.name_scope('accuracy'):
    _pred = tf.argmax(logits, axis=1, output_type=tf.int32)
    acc_vec = tf.equal(labels, _pred)
    acc = tf.reduce_mean(tf.cast(acc_vec, tf.float32))
    
  with tf.name_scope('optimizer'):
    optimizer = tf.train.AdamOptimizer(1e-4)
    train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())
  

  # ||||||||||||||||||||||||||||||  hooks ||||||||||||||||||||||||||||||
  # >>>  logging
  tf.logging.set_verbosity(tf.logging.INFO)
  # global_step = tf.train.get_or_create_global_step()
  # tf.identity(global_step, 'g_step')
  # tf.identity(loss, 'cross_entropy')
  # tf.identity(acc, 'accuracy')
  # tensor_lr = optimizer._lr_t

  tensors = {
    'step': tf.train.get_or_create_global_step().name,
    'loss': loss.name,
    'accuracy': acc.name
  }
  logging_hook = tf.train.LoggingTensorHook(
    tensors= tensors,
    every_n_iter=10
  )

  # >>>  summary
  summary_conf = {
    'dir': log_dir,
    'saved_steps': save_summary_steps
  }
  summary_protobuf = {
    'loss': tf.summary.scalar('cross_entropy', loss),
    'accuracy': tf.summary.scalar('accuracy', acc)
  }
  # print(list(summary_protobuf.values()))
  # summary_loss = tf.summary.scalar('cross_entropy', loss)
  # summary_acc = tf.summary.scalar('accuracy', acc)
  # summary_lr = tf.summary.scalar('lr', optimizer._lr_t)
  # global_step = 
  # merged_op = tf.summary.merge_all()
  # summary_hook = tf.train.SummarySaverHook(
  #   save_steps=1,
  #   output_dir= ckpt_dir,
  #   summary_op= merged_op
  # )
  
  # >>> main run hook
  eval_hook = runhooks.RunHook(
    iter_dict= iter_dict,
    eval_steps= eval_interval,
    train_op= train_op,
    training= model.training,
    holder_handle= holder_handle,
    summary_conf= summary_conf,
    summary_protobuf= summary_protobuf,
  )

  # >>>  checkpoit saver
  ckpt_saver_hook = runhooks.CkptSaverHook(
    ckpt_dir,
    save_steps= save_ckpts_steps
  )
  # ckpt_saver_hook = tf.train.CheckpointSaverHook(
  #   checkpoint_dir= ckpt_dir,
  #   save_steps= save_ckpts_steps,
  # )

  all_hooks = [
      # logging_hook,
      # summary_hook,
      eval_hook,
      ckpt_saver_hook,
      # tf.train.StopAtStepHook(max_steps),
      # tf.train.NanTensorHook(loss)
  ]

  # ////////////////////////////// session config //////////////////////////////
  sess_conf = tf.ConfigProto()
  sess_conf.gpu_options.allow_growth = True
  sess_conf.gpu_options.per_process_gpu_memory_fraction = 0.75
  
  sess_creator = tf.train.ChiefSessionCreator(
      # scaffold=scaffold,
      # master='',
      config=sess_conf,
      checkpoint_dir=ckpt_dir
  )
  # print('end')
  # return

  # ------------------------------  start  ------------------------------
  with tf.train.MonitoredSession(
    session_creator= sess_creator,
    hooks= all_hooks,
    stop_grace_period_secs= 3600
    ) as mon_sess:
    while not mon_sess.should_stop():
      step = mon_sess.run(tf.train.get_global_step()) # arg from retval of _EvalHook before_run()
      # training, step = mon_sess.run([model.training, tf.train.get_global_step()]) # arg from retval of _EvalHook before_run()
      # if not training:
        # print('step {}: eval xxxxxxxxx'.format(step))
      # print(lr)
  return
Пример #7
0
torch.manual_seed(seed)
if use_cuda:
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    torch.cuda.manual_seed(seed)

model = Darknet(cfgfile)
model.print_network()

init_width = model.width
init_height = model.height

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(dataset.MyDataset(
    trainlist,
    shape=(init_width, init_height),
    shuffle=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ]),
    train=False),
                                          batch_size=batch_size,
                                          shuffle=False,
                                          **kwargs)

if use_cuda:
    model = torch.nn.DataParallel(model).cuda()

model.module.load_weights(weightfile)


def test():
    def truths_length(truths):
                               torch.argmax(target_obj[:, 5:], dim=1))
    loss_obj = loss_obj_conf + loss_obj_crood + loss_obj_cls

    mask_noobj = target[..., 0] == 0
    output_noobj = output[mask_noobj]
    target_noobj = target[mask_noobj]
    loss_noobj = conf_loss_fn(output_noobj[:, 0], target_noobj[:, 0])
    loss = alpha * loss_obj + (1 - alpha) * loss_noobj

    return loss


if __name__ == '__main__':
    save_path = "models/net_yolo6.pth"
    train_label_path = r"data/train_label.txt"
    train_data = dataset.MyDataset(train_label_path)
    train_loader = DataLoader(train_data, batch_size=4, shuffle=True)
    # validate_label_path = r"data/validate_label.txt"
    # validate_data = dataset.MyDataset(validate_label_path)
    # validate_loader = DataLoader(validate_data, batch_size=2, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = MainNet().to(device)

    if os.path.exists(save_path):
        net.load_state_dict(torch.load(save_path))
    else:
        print("NO Param")

    net.train()
    opt = torch.optim.Adam(net.parameters())
Пример #9
0
torch.manual_seed(seed)
if use_cuda:
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    torch.cuda.manual_seed(seed)

model       = Darknet(cfgfile)
model.print_network()

init_width        = model.width
init_height       = model.height

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
    dataset.MyDataset(trainlist, labelpath,shape=(init_width, init_height),
                   shuffle=False,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                   ]), train=False),
    batch_size=batch_size, shuffle=False, **kwargs)

if use_cuda:
    model = torch.nn.DataParallel(model).cuda()

def test():
    def truths_length(truths):
        for i in range(50):
            if truths[i][1] == 0:
                return i

    model.eval()
    print('model---------',model)
Пример #10
0
    loss = alpha * loss_obj + (
        1 - alpha) * loss_noobj  #加到一起优化,负样本比例比较高,加个alpha系数均衡正负样本

    return loss  #返回总损失


save_path = r"model\YOLO_net.pth"  #模型路径
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

corr_loss = nn.CrossEntropyLoss()
cls_loss = nn.BCELoss()

# writer = SummaryWriter()
if __name__ == '__main__':

    myDataset = dataset.MyDataset()  #加载数据集
    train_loader = torch.utils.data.DataLoader(myDataset,
                                               batch_size=10,
                                               shuffle=True)

    net = MainNet()  #实例化网络
    if os.path.exists(save_path):
        net.load_state_dict(torch.load(save_path))  #加载网络模型

    net.train().to(device)

    opt = torch.optim.SGD(net.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.0005)  #优化器
    scheduler = lr_scheduler.StepLR(opt, 10, gamma=0.8)  #学习率调整
Пример #11
0
def main():
    # Load parameters
    args = params.args()
    
    # Load train, valid, and test data
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Loading dataset')
    train_dataset = dt.MyDataset(args.train_filename, args.mode)
    valid_dataset = dt.MyDataset(args.valid_filename, args.mode)
    test_dataset = dt.MyDataset(args.test_filename, args.mode)
    gold_dataset = dt.MyDataset(args.gold_filename, args.mode)
    print('train, valid, test num:', len(train_dataset), len(valid_dataset), len(test_dataset))
    
    # Load dataset to DataLoader
    train_loader = DataLoader(dataset=train_dataset, batch_size=args.BATCH_SIZE, shuffle=True)
    valid_loader = DataLoader(dataset=valid_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    gold_loader = DataLoader(dataset=gold_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    
    # Initialize model
    model = siamese.SiameseNetwork(args)
    model.to(device)
    
    # Train model
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start training')
    try:
        train(model, train_loader, valid_loader, args)
    except KeyboardInterrupt:
        print('\n' + '-' * 89)
        print('Exit from training early')
        
    # Save final model
    save(model, args.save_dir, args.model_filename, -1)
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Training finished')
    
    
    # Test model
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start prediction')
    predict = test(test_loader, model, args)
        
    pred_filename = args.predict_dir + '/predict_result.tsv'
    with open(pred_filename, 'w') as f:
        for item in predict:
            f.write(item[0] + '\t' + item[1] + '\t' + str(item[2]) + '\t' + str(item[3]) + '\n')
    f.closed
    print('Successfully save prediction result to', pred_filename)
    
    with open(args.predict_dir + '/rel_embed_vector.tsv', 'w') as f:
        for item in predict:
            out1 = item[5].cpu().numpy().tolist()
            f.write('\t'.join(str(x) for x in out1))
            f.write('\n')
    f.closed
    
    with open(args.predict_dir + '/rel_embed_label.tsv', 'w') as f:
        for item in predict:
            f.write(item[1])
            f.write('\n')
    f.closed
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Prediction finished')
    
    
    # Gold Prediction
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start gold prediction')
    predict = test(gold_loader, model, args)
        
    pred_filename = args.gold_dir + '/predict_result.tsv'
    with open(pred_filename, 'w') as f:
        for item in predict:
            f.write(item[0] + '\t' + item[1] + '\t' + str(item[2]) + '\t' + str(item[3]) + '\n')
    f.closed
    print('Successfully save prediction result to', pred_filename)
    
    with open(args.gold_dir + '/rel_embed_vector.tsv', 'w') as f:
        for item in predict:
            out1 = item[5].cpu().numpy().tolist()
            f.write('\t'.join(str(x) for x in out1))
            f.write('\n')
    f.closed
    
    with open(args.gold_dir + '/rel_embed_label.tsv', 'w') as f:
        for item in predict:
            f.write(item[1])
            f.write('\n')
    f.closed
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Gold prediction finished')
            ls += loss.data

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        ls /= sum
        mse /= sum
        loss_list.append(ls)
        print(f'[{epoch}/{epochs}] Loss: {ls:.5f} | mse: {mse: .5f}')
        if epoch % every_epochs_save == 0:
            model_file_path = f'{model_save_dir}/{machine_type}/model_{machine_type}_{epoch}.pkl'
            torch.save(ae_net, model_file_path)


if __name__ == '__main__':
    os.makedirs(param['model_dir'], exist_ok=True)
    dirs = utils.select_dirs(param)
    process_mt_list = ['pump', 'a_fan', 'slider', 'valve']
    for target_dir in dirs:
        machine_type = os.path.split(target_dir)[-1]
        if machine_type not in process_mt_list:
            continue
        pre_data_path = param['pre_data_dir'] + f'/{machine_type}.db'
        print(f'loading dataset [{machine_type}] ......')
        my_dataset = dataset.MyDataset(pre_data_path, keys=['log_mel'])
        train_loader = DataLoader(my_dataset,
                                  batch_size=param['batch_size'],
                                  shuffle=True)
        print('training ......')
        train(train_loader, machine_type, model_name='AE')
Пример #13
0
def main():

  train_dir = r'\train'
  eval_dir = r'\eval'
  log_dir = r'\session_log'
  ckpt_dir = path.join(log_dir, 'ckpts')
  # model_dir = r'\monitored_sess_log'
  
  eval_interval = 20
  
  save_summary_steps = 5
  save_ckpts_steps = 20
  train_batchsz = 20
  eval_batchsz = 50
  eval_times = 10
  # eval_steps = 40
  epoch = 900
  img_num = 870 * 2
  max_steps = (img_num * epoch) // train_batchsz


  # ------------------------------ prepare input ------------------------------
  dset = dataset.MyDataset(train_dir, eval_dir)
  prefetch_batch = None
  iter_dict = {
    'train': dset.train(train_batchsz, prefetch_batch),
    'eval': dset.eval(eval_batchsz, prefetch_batch)
  }

  # train_iter = dset.train(train_batchsz, prefetch_batch)
  # eval_iter = dset.eval(eval_batchsz, prefetch_batch)
  # dict_tsr_handle = {
    # 'train': train_iter.string_handle(),
    # 'eval': eval_iter.string_handle()
  # }

  holder_handle = tf.placeholder(tf.string, [])
  iter = tf.data.Iterator.from_string_handle(
      holder_handle, dset.output_types)
  # next_elem = iter.get_next()
  inputx, labels, filename = iter.get_next()
  inputx = tf.reshape(inputx, [-1, 200, 250, 3])
  # eval_x.set_shape([eval_batchsz, 200, 250, 3])

  # train_x, train_y, train_fname = dset.train(train_batchsz, prefetch_batch)
  # train_x.set_shape([train_batchsz, 200, 250, 3])
  # eval_x, eval_y, eval_fname = dset.eval(eval_batchsz, prefetch_batch)
  # eval_x.set_shape([eval_batchsz, 200, 250, 3])

  # \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ build graph \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
  model = cnn.CNN(data_format = 'NHWC')
  logits = model(inputx)
  with tf.name_scope('cross_entropy'):
    loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
    
  with tf.name_scope('accuracy'):
    _pred = tf.argmax(logits, axis=1, output_type=tf.int32)
    acc_vec = tf.equal(labels, _pred)
    acc = tf.reduce_mean(tf.cast(acc_vec, tf.float32))
    # float
    
  with tf.name_scope('optimizer'):
    optimizer = tf.train.AdamOptimizer(1e-4)
    train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())
  

  # >>>  summary
  summary_conf = {
    'dir': log_dir,
    'saved_steps': save_summary_steps
  }
  summary_protobuf = {
    'loss': tf.summary.scalar('cross_entropy', loss),
    'accuracy': tf.summary.scalar('accuracy', acc)
  }
  # print(list(summary_protobuf.values()))
  # summary_loss = tf.summary.scalar('cross_entropy', loss)
  # summary_acc = tf.summary.scalar('accuracy', acc)
  # summary_lr = tf.summary.scalar('lr', optimizer._lr_t)
  # global_step = 
  merged_op = tf.summary.merge_all()
  # summary_hook = tf.train.SummarySaverHook(
  #   save_steps=1,
  #   output_dir= ckpt_dir,
  #   summary_op= merged_op
  # )
  
  # >>>  eval

  op_init1 = tf.variables_initializer(tf.global_variables())
  op_init2 = tf.variables_initializer(tf.local_variables())
  op_group = tf.group(op_init1, op_init2)

  # ////////////////////////////// session config //////////////////////////////
  sess_conf = tf.ConfigProto()
  sess_conf.gpu_options.allow_growth = True
  sess_conf.gpu_options.per_process_gpu_memory_fraction = 0.75
  
  train_writer = tf.summary.FileWriter(path.join(summary_conf['dir'], 'train'))
  eval_writer = tf.summary.FileWriter(path.join(summary_conf['dir'], 'eval'))

  summary_timer = tf.train.SecondOrStepTimer(every_steps=save_summary_steps)
  eval_timer = tf.train.SecondOrStepTimer(every_steps=eval_interval)
  with tf.Session(config= sess_conf) as sess:
    train_handle = sess.run(iter_dict['train'].string_handle())
    eval_handle = sess.run(iter_dict['eval'].string_handle())

    feature2rnn = sess.run(model.feature2rnn, feed_dict= {
          holder_handle: eval_handle,
          model.is_training: False
        })
    np.save(filename+'.pny', feature2rnn)

    # sess.run(op_group)
    # for _step in range(max_steps):
    #   if eval_timer.should_trigger_for_step(_step):
    #     for i in range(eval_times):
          
    #       pass

    #     summary_str = sess.run(merged_op, feed_dict= {
    #       holder_handle: eval_handle,
    #       model.is_training: False
    #     })
    #     eval_writer.add_summary(summary_str, _step)
    #     eval_timer.update_last_triggered_step(_step)
      
    #   summary_str, _ = sess.run([merged_op, train_op, acc], feed_dict={
    #     holder_handle: train_handle,
    #     model.is_training: True
    #   })
    #   if summary_timer.should_trigger_for_step(_step):
    #     train_writer.add_summary(summary_str, _step)
    #     summary_timer.update_last_triggered_step(_step)

  print('end')
  return
Пример #14
0
    mask_zero = label[..., 0] == 0
    label_zero = label[mask_zero].float()
    out_zero = out[mask_zero].cpu()

    loss_zero_conf = loss_fn1(out_zero[:, 0], label_zero[:, 0])

    loss = a * loss_nonzero + (1 - a) * loss_zero_conf

    return loss


if __name__ == '__main__':
    net = MainNet()
    if torch.cuda.is_available():
        net = net.cuda()
    mydata = dataset.MyDataset()
    label_data = data.DataLoader(mydata,
                                 batch_size=2,
                                 shuffle=True,
                                 num_workers=6)

    # net.load_state_dict(torch.load(os.path.join(save_path,save_param)))
    optimizer = torch.optim.Adam((net.parameters()))
    for epoch in range(20000):
        for label_13, label_26, label_52, img_data in label_data:
            # print('label_13',label_13[:,:,0,0])
            if torch.cuda.is_available():
                img_data = img_data.cuda()
            out_13, out_26, out_52 = net(img_data)
            # print('out',out_13[:,:,0,0])
            loss_fn1 = nn.BCEWithLogitsLoss()
Пример #15
0
def main():
    transform = transforms.Compose([
        transforms.Resize((448, 448)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    grid_num = 7 if args.command == "basic" else 14

    trainset = dataset.MyDataset(root="hw2_train_val/train15000",
                                 grid_num=grid_num,
                                 train=args.augment,
                                 transform=transform)

    testset = dataset.MyDataset(grid_num=grid_num,
                                root="hw2_train_val/val1500",
                                train=False,
                                transform=transform)

    trainLoader = DataLoader(trainset,
                             batch_size=args.batchs,
                             shuffle=True,
                             num_workers=args.worker)
    testLoader = DataLoader(testset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=args.worker)
    device = utils.selectDevice(show=True)

    if args.command == "basic":
        model = models.Yolov1_vgg16bn(pretrained=True).to(device)
        criterion = models.YoloLoss(7., 2., 5., 0.5, device).to(device)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=1e-4)
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 45, 55],
                                                   gamma=0.1)
        start_epoch = 0

        if args.load:
            model, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
                args.load, model, optimizer, scheduler)

        model = train(model,
                      criterion,
                      optimizer,
                      scheduler,
                      trainLoader,
                      testLoader,
                      start_epoch,
                      args.epochs,
                      device,
                      lr=args.lr,
                      grid_num=7)

    elif args.command == "improve":
        model_improve = models.Yolov1_vgg16bn_Improve(
            pretrained=True).to(device)
        criterion = models.YoloLoss(14., 2., 5, 0.5, device).to(device)
        optimizer = optim.SGD(model_improve.parameters(),
                              lr=args.lr,
                              weight_decay=1e-4)
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 40, 70],
                                                   gamma=0.1)
        start_epoch = 0

        if args.load:
            model_improve, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
                args.load, model, optimizer, scheduler)

        model_improve = train(model_improve,
                              criterion,
                              optimizer,
                              scheduler,
                              trainLoader,
                              testLoader,
                              start_epoch,
                              args.epochs,
                              device,
                              lr=args.lr,
                              grid_num=7,
                              save_name="Yolov1-Improve")