예제 #1
0
파일: __init__.py 프로젝트: ramonski/pcmd
def main():
    parser = argparse.ArgumentParser(description='Runs a shell command in the parent shell')

    parser.add_argument('cmd',
                        metavar='COMMAND',
                        type=str,
                        help='Command to execute')

    parser.add_argument('section',
                        metavar='SECTION',
                        type=str,
                        help='Section where the Command is executed')

    args = parser.parse_args()

    pcmd = get_parser()

    if not is_config_there():
        msg = "No Config found in %s\n" % get_config()
        parser.exit(status=1, message=msg)

    if not pcmd.has_section(args.section):
        msg = "Section %s not in config\n" % args.section
        parser.exit(status=1, message=msg)

    if not pcmd.has_option(args.section, args.cmd):
        msg = "Command %s not in Section %s\n" % (args.cmd,  args.section)
        parser.exit(status=1, message=msg)

    # run the shell command in the parent terminal
    do_script(pcmd.get(args.section, args.cmd))
import optimizer_util
import config as configs
from util import Snapshot, Summary, InitNodes, Metric
from job_function_util import get_train_config, get_val_config
import resnet_model
import resnext_model
import vgg_model
import alexnet_model
import inception_model
import mobilenet_v2_model

<<<<<<< HEAD
=======

>>>>>>> tianshu
parser = configs.get_parser()
args = parser.parse_args()
configs.print_args(args)

total_device_num = args.num_nodes * args.gpu_num_per_node
train_batch_size = total_device_num * args.batch_size_per_device
val_batch_size = total_device_num * args.val_batch_size_per_device
(C, H, W) = args.image_shape
epoch_size = math.ceil(args.num_examples / train_batch_size)
num_val_steps = int(args.num_val_examples / val_batch_size)

model_dict = {
    "resnet50": resnet_model.resnet50,
    "vgg": vgg_model.vgg16bn,
    "alexnet": alexnet_model.alexnet,
    "inceptionv3": inception_model.inceptionv3,
예제 #3
0
    def build_parser(self):
        parser = get_parser("Augment config")
        parser.add_argument('--name', default='')
        parser.add_argument(
            '--dataset',
            default='ImageNet',
            help='imagenet / ImageNet56 / ImageNet112 / cifar10')
        parser.add_argument('--data_path',
                            default='/gdata/ImageNet2012',
                            help='data path')
        parser.add_argument('--data_loader_type',
                            default='torch',
                            help='torch/dali')
        parser.add_argument('--grad_clip',
                            type=float,
                            default=0,
                            help='gradient clipping for weights')
        parser.add_argument(
            '--model_method',
            default='my_model_collection',
        )
        parser.add_argument(
            '--model_name',
            default='my_model_collection',
        )
        parser.add_argument('--model_init',
                            type=str,
                            default='he_fout',
                            choices=['he_fin', 'he_fout'])

        parser.add_argument('--batch_size',
                            type=int,
                            default=256,
                            help='batch size')
        # parser.add_argument('--lr', type=float, default=0.05, help='lr for weights')
        # parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
        # parser.add_argument('--weight_decay', type=float, default=4e-5, help='weight decay')
        parser.add_argument('--label_smoothing', type=float, default=0.1)
        parser.add_argument('--no_decay_keys',
                            type=str,
                            default='bn',
                            choices=['None', 'bn', 'bn#bias'])

        parser.add_argument('--print_freq',
                            type=int,
                            default=1,
                            help='print frequency')
        parser.add_argument('--gpus',
                            default='0',
                            help='gpu device ids separated by comma. '
                            '`all` indicates use all gpus.')
        # parser.add_argument('--epochs', type=int, default=150, help='# of training epochs')
        parser.add_argument('--init_channels', type=int, default=36)
        parser.add_argument('--layers',
                            type=int,
                            default=20,
                            help='# of layers')
        parser.add_argument('--seed', type=int, default=2, help='random seed')

        parser.add_argument('--workers',
                            type=int,
                            default=4,
                            help='# of workers')
        parser.add_argument('--aux_weight',
                            type=float,
                            default=0,
                            help='auxiliary loss weight')
        parser.add_argument('--cutout_length',
                            type=int,
                            default=0,
                            help='cutout length')
        parser.add_argument('--auto_augmentation',
                            action='store_true',
                            default=False,
                            help='using autoaugmentation')

        parser.add_argument('--bn_momentum', type=float, default=0.1)
        parser.add_argument('--bn_eps', type=float, default=1e-3)
        parser.add_argument('--sync_bn',
                            action='store_true',
                            default=False,
                            help='using sync_bn model')
        parser.add_argument('--dropout_rate', type=float, default=0)
        # parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob')
        parser.add_argument('--drop_path_prob',
                            type=float,
                            default=0,
                            help='drop path prob')

        parser.add_argument('--genotype', default='', help='Cell genotype')
        parser.add_argument('--structure_path',
                            default=None,
                            type=str,
                            help='Config path')
        parser.add_argument('--deterministic',
                            action='store_true',
                            default=False,
                            help='using deterministic model')

        parser.add_argument('--pretrained',
                            type=str,
                            default=False,
                            help='load pretrained module')

        return parser
예제 #4
0
            rho_best = rho
            epoch_best = epoch
            log_and_print(base_logger, '-----New best found!-----')
            if args.save:
                torch.save(
                    {
                        'epoch': epoch,
                        'i3d': i3d.state_dict(),
                        'evaluator': evaluator.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'rho_best': rho_best
                    }, f'ckpts/{args.type}.pt')


if __name__ == '__main__':

    args = get_parser().parse_args()

    if not os.path.exists('./exp'):
        os.mkdir('./exp')
    if not os.path.exists('./ckpts'):
        os.mkdir('./ckpts')

    init_seed(args)

    base_logger = get_logger(f'exp/{args.type}.log', args.log_info)
    i3d, evaluator = get_models(args)
    dataloaders = get_dataloaders(args)

    main(dataloaders, i3d, evaluator, base_logger, args)
예제 #5
0
                if e.config.summarize:
                    writer.add_scalar(
                        "dev/best_result", best_dev_res, it)
                    for n, v in test_perf.items():
                        writer.add_scalar(
                            "test/" + n, v, it)
            e.log.info("best dev result: {:.4f}, "
                       "test result: {:.4f}, "
                       .format(best_dev_res, test_res))
            label_stats.reset()
            unlabel_stats.reset()


if __name__ == '__main__':

    args = config.get_parser().parse_args()
    args.use_cuda = torch.cuda.is_available()

    def exit_handler(*args):
        print(args)
        print("best dev result: {:.4f}, "
              "test result: {:.4f}"
              .format(best_dev_res, test_res))
        exit()

    train_helper.register_exit_handler(exit_handler)

    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    with train_helper.experiment(args, args.prefix) as e:
예제 #6
0
def main():
    args = get_parser().parse_args()
    print(args)
                segmentation = Segmentation(segment)
                section_segmented = segmentation.execute()
                
                sections_segmented.append(section_segmented)
            
            self.save_solution(sections_segmented, original_image.image)
            elapsed_time = int(time.time() - start_time)

            minutes = elapsed_time % 3600 // 60
            seconds = elapsed_time % 60
            print("Finished extracting in {:02d}:{:02d}".format(minutes, 
                                                                seconds))
        print("Finished extracting.")

if __name__ == "__main__":
    parser = get_parser()
    no_parser = False
    
    try:
        args = parser.parse_args()
        pass
    except:
        args = sys.argv[1:]
        no_parser = True

    if no_parser:
        if args[0] != '-h':
            solution = Solution(args=args, no_parser=no_parser)
            solution.run()
    else:
        solution = Solution(args=args, no_parser=no_parser)
예제 #8
0
def main():
    #获取参数 vars将parser对象转换为字典
    option=vars(get_parser().parse_args())
    #首先判断保存结果的文件夹是否存在,并写入参数到json文件中保存
    if not os.path.isdir(option['log_file']):
        os.makedirs(option['log_file'])

    with open(os.path.join(option['log_file'],'opt.json'),'w') as f:
        json.dump(option,f)
        f.write('\n')
    #定义trace_file文件路径
    trace_file=os.path.join(option['log_file'],'trace.txt')

    #设置随机数种子
    torch.manual_seed(2018)
    if option['data_cuda']:
        torch.cuda.manual_seed(2018)


    #step 1 加载数据集
    if option['run_mode']=='train_val':
        trainLoader,valLoader=init_dataset(option,mode=option['run_mode'])

    #step2 加载模型
    model=MatchingNetwork(keep_prob=0.0,num_channles=1,fce=False,
                          image_size=28,batch_size=1,use_cuda=option['data_cuda'])
    if option['data_cuda']:
        model.cuda()
    #step3 目标函数和优化器
    optimizer=torch.optim.Adam(model.parameters(),lr=option['train_learningrate'])
    lr_schedule=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,gamma=0.5,step_size=option['train_decay_every'])

    #训练
    if os.path.isfile(trace_file):
        os.remove(trace_file)
    best_acc=0.0
    for epoch in range(option['train_epoches']):
        train_loss=[]
        train_acc=[]
        print("------epoch: %2d --------"%epoch)
        model.train()
        for data,label in tqdm(trainLoader):
            data=Variable(data)

            optimizer.zero_grad()
            if(option['data_cuda']):
                data=data.cuda()
            acc,loss=model(data,option['data_way'],option['data_shot'],option['data_query'])

            loss.backward()
            optimizer.step()
            train_loss.append(float(loss))
            train_acc.append(float(acc))

        avg_loss=np.mean(train_loss)
        avg_acc=np.mean(train_acc)
        lr_schedule.step()
        print("epoch %2d 训练结束 : avg_loss:%.4f , avg_acc:%.4f"%(epoch,avg_loss,avg_acc))

        #下面进入validation阶段

        val_acc=[]
        print("开始进行validation:")
        model.eval()
        for data,label in tqdm(valLoader):
            data=Variable(data)
            if option['data_cuda']:
                data=data.cuda()

            acc_val,_=model(data,option['data_test_way'],option['data_test_shot'],option['data_test_query'])
            val_acc.append(float(acc_val))

        avg_acc_val=np.mean(val_acc)
        print("validation结束 : avg_acc:%.4f"%avg_acc_val)
        if (best_acc<avg_acc_val):
            print("产生目前最佳模型,正在保存......")
            name=model.save(option['log_file'])
            best_acc = avg_acc_val
            print("保存成功,保存在: ",name)
        with open(trace_file,'a') as f:
            f.write('epoch:{:2d} 训练结束:avg_loss:{:.4f} , avg_acc:{:.4f} , validation_acc:{:.4f}'.format(epoch,avg_loss,avg_acc,avg_acc_val))
            f.write('\n')

    print("训练结束,最佳模型的精度为:",best_acc)
예제 #9
0
        images = torch.from_numpy(images).type(torch.FloatTensor).to(device)
        # 当使用CPU时,跳过GPU相关指令
        if device != torch.device("cpu"):
            torch.cuda.synchronize(device)

        # model_time = time.time()
        outputs = model(images)
        outputs = [{k: v.to(cpu_device).numpy() for k, v in t.items()} for t in outputs]

        infos.append([outputs, targets, shapes])
        if draw:
            for x in range(len(outputs)):
                os.makedirs('result1', exist_ok=True)
                savename = os.path.join('result1', os.path.split(shapes[x][2])[-1])
                plot_boxes_cv2(images_src[x], outputs[x]['boxes'], savename=savename, class_names=None)
        torch.set_num_threads(n_threads)

    Map = make_labels_and_compute_map(infos, parser_data.input_dir, save_err_miss=parser_data.save_err_miss)
    return Map



if __name__ == "__main__":
    from config import get_parser
    args = get_parser()
    lines, num_train, num_val = get_train_lines(args.data_path)
    args.pth_path = './save_weights/Epoch_022_Loss_0.0736_lr_0.000059.pth'
    main(args, lines, draw=True)


예제 #10
0
parser.add_argument('--eval',
                    type=ast.literal_eval,
                    default=True,
                    help='Evaluate the interpretation results or not')
parser.add_argument('--path',
                    type=str,
                    default="data/",
                    help='the input data path, can be a single csv '
                    'or a data folder')
parser.add_argument('--w2s_ratio',
                    type=str,
                    default='real_len',
                    help='\'real-len\', \'auto\', \'pn\', or a ratio.')
parser.add_argument('--runs', type=int, default=1)
parser.add_argument('--record_name', type=str, default='')
parser = get_parser(algorithm_name, parser)
args = parser.parse_args()

input_root_list = [root + args.path]
w2s_ratio = args.w2s_ratio
od_eval_model = [
    "iforest", "copod", "hbos"
]  # we obtain ground-truth annotations using three outlier detection methods
runs = args.runs
record_name = args.record_name

# ------------------- record ----------------- #
if not os.path.exists("record/" + algorithm_name):
    os.makedirs("record/" + algorithm_name)
if not os.path.exists("checkpoints"):
    os.makedirs("checkpoints/")
예제 #11
0
smiles = []
for i in df.itertuples(index=False):
    if len(i < max_len):
        smiles.append(i)

vocab = OneHotVocab.from_data(smiles)
experiment = Experiment(project_name='pytorch')


train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                                           num_workers=8 * 6,
                                           pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8 * 6,
                                          pin_memory=True)

model = VAE(vocab, get_parser())
optimizer = optim.Adam((p for p in model.vae.parameters() if p.requires_grad), lr=0.0003)


def train():
    return

def test():
    return

def sample():
    return


for epoch in range(1, epochs + 1):
예제 #12
0
import sounddevice as sd
import numpy as np
import comm
import huffman
import rec
import config
import os

sd.default.samplerate = fs = 48000
sd.default.channels = 1
start_time = 0
end_time = 0
# args = get_args()
parser = config.get_parser()
args = parser.parse_args()
if __name__ == '__main__':
    print("Hi~ Welcome to project check script")
    if args.send:
        huffman.getHuffFile()
        print('You are sending signal...')
        while True:
            part = input("Press any key to send\n")
            # if part == 'e':
            # 	exit
            comm.play_file()
            print('Done\n')

    else:
        print('You are receiving signal...')
        if os.path.exists('record.wav'):
            os.remove('record.wav')
예제 #13
0
def get_config():
    parser = config.get_parser()

    # pretrain bert config
    parser.add_argument(
        "--ofrecord_path",
        type=str,
        default="/dataset/bert/of_wiki_seq_len_128",
        help="Path to ofrecord dataset",
    )
    parser.add_argument(
        "--train-dataset-size",
        type=int,
        default=10000000,
        help="dataset size of ofrecord",
    )
    parser.add_argument(
        "--train-data-part", type=int, default=64, help="data part num of ofrecord"
    )
    parser.add_argument(
        "--train-batch-size", type=int, default=8, help="Training batch size"
    )
    parser.add_argument(
        "--val-batch-size", type=int, default=32, help="Validation batch size"
    )
    parser.add_argument(
        "--train-global-batch-size",
        type=int,
        default=None,
        dest="train_global_batch_size",
        help="train batch size",
    )
    parser.add_argument(
        "--val-global-batch-size",
        type=int,
        default=None,
        dest="val_global_batch_size",
        help="val batch size",
    )

    parser.add_argument("-e", "--epochs", type=int, default=1, help="Number of epochs")

    parser.add_argument(
        "--with-cuda",
        type=bool,
        default=True,
        help="Training with CUDA: true, or false",
    )
    parser.add_argument(
        "--cuda_devices", type=int, nargs="+", default=None, help="CUDA device ids"
    )
    parser.add_argument(
        "--optim_name", type=str, default="adamw", help="optimizer name"
    )
    parser.add_argument("--lr", type=float, default=1e-3, help="Learning rate of adam")
    parser.add_argument(
        "--weight_decay", type=float, default=0.01, help="Weight_decay of adam"
    )
    parser.add_argument(
        "--loss_print_every_n_iters",
        type=int,
        default=20,
        help="Interval of training loss printing",
    )
    parser.add_argument(
        "--val_print_every_n_iters",
        type=int,
        default=20,
        help="Interval of evaluation printing",
    )
    parser.add_argument(
        "--checkpoint_path",
        type=str,
        default="checkpoints",
        help="Path to model saving",
    )
    parser.add_argument(
        "--grad-acc-steps", type=int, default=1, help="Steps for gradient accumulation"
    )
    parser.add_argument(
        "--nccl-fusion-threshold-mb",
        type=int,
        default=16,
        dest="nccl_fusion_threshold_mb",
        help="NCCL fusion threshold megabytes, set to 0 to compatible with previous version of OneFlow.",
    )
    parser.add_argument(
        "--nccl-fusion-max-ops",
        type=int,
        default=24,
        dest="nccl_fusion_max_ops",
        help="Maximum number of ops of NCCL fusion, set to 0 to compatible with previous version of OneFlow.",
    )
    parser.add_argument(
        "--use_ddp",
        type=str2bool,
        nargs="?",
        const=True,
        help="Whether to use use fp16",
    )
    parser.add_argument(
        "--use_consistent",
        type=str2bool,
        nargs="?",
        const=True,
        help="Whether to use use consistent",
    )
    parser.add_argument(
        "--metric-local",
        type=str2bool,
        default=False,
        nargs="?",
        const=True,
        dest="metric_local",
    )

    args = parser.parse_args()
    return args