예제 #1
0
         batch_size_pseudo=args.batch_size_pseudo,
         state=0,
         split=args.train_set,
         input_sizes=input_sizes,
         sets_id=args.sets_id,
         mean=mean,
         std=std,
         keep_scale=keep_scale,
         reverse_channels=reverse_channels)
     after_loading()
     net, optimizer = accelerator.prepare(net, optimizer)
     time_now = time.time()
     ratio = generate_class_balanced_pseudo_labels(
         net=net,
         device=device,
         loader=unlabeled_loader,
         input_size=input_sizes[2],
         label_ratio=args.label_ratio,
         num_classes=num_classes,
         is_mixed_precision=args.mixed_precision)
     print(ratio)
     print('Pseudo labeling time: %.2fs' % (time.time() - time_now))
 else:
     labeled_loader, pseudo_labeled_loader, val_loader = init(
         valtiny=args.valtiny,
         no_aug=args.no_aug,
         data_set=args.dataset,
         batch_size_labeled=args.batch_size_labeled,
         batch_size_pseudo=args.batch_size_pseudo,
         state=1,
         split=args.train_set,
         input_sizes=input_sizes,
예제 #2
0
         batch_size_labeled=args.batch_size_labeled,
         batch_size_pseudo=args.batch_size_pseudo,
         state=0,
         split=args.train_set,
         input_sizes=input_sizes,
         sets_id=args.sets_id,
         mean=mean,
         std=std,
         keep_scale=keep_scale,
         reverse_channels=reverse_channels)
     after_loading()
     time_now = time.time()
     generate_class_balanced_pseudo_labels(
         net=net,
         device=device,
         loader=unlabeled_loader,
         input_size=input_sizes[0],
         label_ratio=args.label_ratio,
         num_classes=num_classes)
     print('Pseudo labeling time: %.2fs' % (time.time() - time_now))
 else:
     labeled_loader, pseudo_labeled_loader, val_loader = init(
         valtiny=args.valtiny,
         no_aug=args.no_aug,
         data_set=args.dataset,
         batch_size_labeled=args.batch_size_labeled,
         batch_size_pseudo=args.batch_size_pseudo,
         state=1,
         split=args.train_set,
         input_sizes=input_sizes,
         sets_id=args.sets_id,