Esempio n. 1
0
 def __init__(self):
     print "Starting account creation and buildup"
     self.step = 0
     from misc import Misc
     from core.base import base
     base = base()
     if Misc.confirm(prompt="Are you sure you want to create an account infrastructure?", resp=False):
         self.account = base.get_account_information()
         if 'profile_name' in self.account['cli_arguments']:
             print "Aws account has been provided"
         else:
             logger.error("Aws account not provided")
             exit(1)
         if 'region_name' in self.account['cli_arguments']:
             print "Aws region has been provided"
         else:
             logger.error("Aws region not provided")
             exit(1)
         if 'session' in self.account and self.account['session'] is not (None or ""):
             logger.info("Session object created succesfully")
         else:
             logger.error("Aws Session not created successfuly")
             exit(1)
         self.run_workflow()
     else:
         print "You are not prepared - Illidian"
Esempio n. 2
0
    def __init__(self):
        from core.base import base
        base = base()
        self.global_options = '''
Global Options:
  -v, --verbose                 increment client verbosity level (max 5)
  -q, --quiet                   decrease client verbosity level (max 0)
  # Default verbosity level 3
  --aws_account                     which account should be used
  --aws_region                      which region should we use for running the endpoint
  --aws_secret_key                  the secret key for aws
  --aws_access_key                  the access key for aws

Global Output options:
  --table                       Output should use Prettytable to printing
  --csv                         Output should use csv format for printing (delimiter ';')

    '''
        parser = argparse.ArgumentParser(description='Ec2 tool for devops', usage='''ec2.py <command> [<args>]

First level options are following:
  ec2                         ec2 instance related subcommands
  elb                         elb related subcommands
  route53                     route53 related subcommands
  sg                          securitygroup related subcommands
  ami                         ami related subcommands
  iam                         iam related subcommands
  vpc                         vpc related subcommands
  rds                         rds related subcommands
  autoscale                   autoscale related subcommands
  s3                          s3 related subcommands
  cloudformation              cloudformation related subcommands
  stack                       stack related subcommands
  kinesis                     kinesis related subcommands
  apigateway                  apigateway related subcommands
    ''' + self.global_options)
        parser.add_argument('command', help='Endpoint to use')
        args = parser.parse_args(sys.argv[1:2])
        self.account = base.get_account_information()
        if not hasattr(self, args.command):
            logger.error('Unrecognized command')
            parser.print_help()
            exit(1)
        getattr(self, args.command)()
Esempio n. 3
0
def main(config):

	# environments
	make_dirs(config.save_path)
	make_dirs(os.path.join(config.save_path, 'logs/'))
	make_dirs(os.path.join(config.save_path, 'model/'))
	make_dirs(os.path.join(config.save_path, 'features/'))
	make_dirs(os.path.join(config.save_path, 'results/'))
	make_dirs(os.path.join(config.save_path, 'images/'))
	os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'


	# loaders
    # data_aug = transforms.Compose([
    #
    # ])
	# transform_train = transforms.Compose(
    #     [XRayCenterCrop(),
    #      XRayResizer(config.image_size),
	# 	 ToPILImage(),
	# 	 histeq(),
	# 	 transforms.Grayscale(num_output_channels=3),
	# 	 transforms.ToTensor()])

	transform = torchvision.transforms.Compose([XRayCenterCrop(),
												XRayResizer(224),
												ToPILImage(),
												t.Grayscale(num_output_channels=3)
												])

	aug = torchvision.transforms.RandomApply([t.ColorJitter(brightness=0.5, contrast=0.7),
											  t.RandomRotation(120),
											  t.RandomResizedCrop(224, scale=(0.6, 1.0), ratio=(0.75, 1.33),
																  interpolation=2),
											  t.RandomHorizontalFlip(),
											  t.RandomVerticalFlip(),
											  ], p=0.5)
	aug = t.Compose([aug, t.ToTensor()])

	loader= dataset_loader(config, transform, aug)

	# base
	Base = base(config, loader)


	# logger
	logger = Logger(os.path.join(os.path.join(config.save_path, 'logs/'), 'logging.txt'))
	logger(config)


	if config.mode == 'train':

		# automatically resume model from the latest one
		start_train_epoch = 0
		if True:
			root, _, files = os_walk(Base.save_model_path)
			if len(files) > 0:
				# get indexes of saved models
				indexes = []
				for file in files:
					indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

				# remove the bad-case and get available indexes
				model_num = len(Base.model_list)
				available_indexes = copy.deepcopy(indexes)
				for element in indexes:
					if indexes.count(element) < model_num:
						available_indexes.remove(element)

				available_indexes = sorted(list(set(available_indexes)), reverse=True)
				unavailable_indexes = list(set(indexes).difference(set(available_indexes)))

				if len(available_indexes) > 0:  # resume model from the latest model
					Base.resume_model(available_indexes[0])
					start_train_epoch = available_indexes[0]
					logger('Time: {}, automatically resume training from the latest step (model {})'.
						   format(time_now(), available_indexes[0]))
				else:  #
					logger('Time: {}, there are no available models')

		# train loop
		for current_step in range(start_train_epoch, config.joint_training_steps):

			# save model every step. extra models will be automatically deleted for saving storage
			Base.save_model(current_step)

			# evaluate reid
			# if (current_step+1)%10 ==0:
			# 	logger('**********' * 10 + 'evaluate' + '**********' * 10)
			# 	results = test(config, base, loader_target, True)
			# 	for key in list(results.keys()):
			# 		logger('Time: {}, {}, {}'.format(time_now(), key, results[key]))
			# 	logger('')
			logger('**********'*10 + 'train' + '**********'*10 )
			train_titles, train_values, val_titles, val_values = train_a_ep(config, Base, loader, current_step)
			logger('Time: {};  Step: {};  {}'.format(time_now(), current_step, analyze_names_and_meter(train_titles, train_values)))
			logger('Time: {};  Step: {};  {}'.format(time_now(), current_step, analyze_names_and_meter(val_titles, val_values)))
			logger('')
Esempio n. 4
0
import os
import h5py
import numpy as np
import argparse
from core.feature_extraction import extract_feat
# from memory_profiler import profile
from pyprind import ProgBar
from core.base import base

b = base()

# class feature
# 返回目录中所有jpg图像的文件名列表。
# def getImageList(path):
#     return [os.path.join (path, f) for f in os.listdir (path) if f.endswith ('.JPEG')]


# 命令行参数功能
def comdArgs():
    """
    command argument
    :return:train path and feat file
    """
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", required=True, help="训练集的路径")
    ap.add_argument("-f", required=True, help="特征索引文件的路径名称")
    args = vars(ap.parse_args())
    return b.getFileList(args["d"], "JPEG"), args["f"]


# 按指定格式读取h5文件
Esempio n. 5
0
def main(config):

    # environments
    make_dirs(config.save_path)
    make_dirs(os.path.join(config.save_path, 'logs/'))
    make_dirs(os.path.join(config.save_path, 'model/'))
    make_dirs(os.path.join(config.save_path, 'features/'))
    make_dirs(os.path.join(config.save_path, 'results/'))
    make_dirs(os.path.join(config.save_path, 'images/'))
    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'

    # loaders
    transform = torchvision.transforms.Compose([
        t.ToPILImage(),
    ])
    aug = torchvision.transforms.RandomApply([
        t.ColorJitter(brightness=0.5, contrast=0.7),
        t.RandomRotation(120),
        t.RandomResizedCrop(
            224, scale=(0.6, 1.0), ratio=(0.75, 1.33), interpolation=2),
        t.RandomHorizontalFlip(),
        t.RandomVerticalFlip(),
    ],
                                             p=0.5)
    # random_hist = t.RandomApply([histeq()], p=0.5)
    aug = t.Compose([
        aug,
        t.ToTensor(),
        t.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])
    loader = stain_dataloader(config, transform, aug)

    # base
    Base = base(config, loader)

    # logger
    logger = Logger(
        os.path.join(os.path.join(config.save_path, 'logs/'), 'logging.txt'))
    csv_log = open(os.path.join(config.save_path, 'logs/csv_log.csv'), 'w')
    logger(config)

    if config.mode == 'train':

        # automatically resume model from the latest one
        start_train_epoch = 0
        if True:
            root, _, files = os_walk(Base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))

                # remove the bad-case and get available indexes
                model_num = len(Base.model_list)
                available_indexes = copy.deepcopy(indexes)
                for element in indexes:
                    if indexes.count(element) < model_num:
                        available_indexes.remove(element)

                available_indexes = sorted(list(set(available_indexes)),
                                           reverse=True)
                unavailable_indexes = list(
                    set(indexes).difference(set(available_indexes)))

                if len(available_indexes
                       ) > 0:  # resume model from the latest model
                    Base.resume_model(available_indexes[0])
                    start_train_epoch = available_indexes[0]
                    logger(
                        'Time: {}, automatically resume training from the latest step (model {})'
                        .format(time_now(), available_indexes[0]))
                else:  #
                    logger('Time: {}, there are no available models')

        # train loop
        for current_step in range(start_train_epoch,
                                  config.joint_training_steps):

            # save model every step. extra models will be automatically deleted for saving storage
            Base.save_model(current_step)

            logger('**********' * 10 + 'train' + '**********' * 10)
            train_titles, train_values, val_titles, val_values, test_titles, test_values, metric_bag = train_a_ep(
                config, Base, loader, current_step)
            logger('Time: {};  Step: {};  {}'.format(
                time_now(), current_step,
                analyze_names_and_meter(train_titles, train_values)))
            logger('Time: {};  Step: {};  {}'.format(
                time_now(), current_step,
                analyze_names_and_meter(val_titles, val_values)))
            for i, _ in enumerate(metric_bag):
                metric_bag[i] = round(metric_bag[i], 3)
            logger(
                'Time: {};  Step: {};  AP:{}; AuC:{}, Precision:{}, Recall:{}, Sensitivity:{}, Specificity:{}, f1:{}'
                .format(time_now(), current_step, metric_bag[0], metric_bag[1],
                        metric_bag[2], metric_bag[3], metric_bag[4],
                        metric_bag[5], metric_bag[6]), '.3f')
            logger('')
            list_all = analyze_meter_4_csv(val_titles, val_values) + metric_bag
            csv_writer = csv.writer(csv_log)
            csv_writer.writerow(list_all)
            # csv_log.close()
            if (current_step + 1) % 10 == 0:
                logger('**********' * 10 + 'test' + '**********' * 10)
                test_titles, test_values, metric_bag = test_a_ep(
                    config, Base, loader, current_step)
                logger('Time: {};  Step: {};  {}'.format(
                    time_now(), current_step,
                    analyze_names_and_meter(test_titles, test_values)))
                logger('')
Esempio n. 6
0
def main(config):

    # environments
    make_dirs(config.save_path)
    make_dirs(os.path.join(config.save_path, 'logs/'))
    make_dirs(os.path.join(config.save_path, 'model/'))
    make_dirs(os.path.join(config.save_path, 'dataset/'))
    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'

    # loaders
    transform = torchvision.transforms.Compose([
        XRayResizer(config.image_size),
        CLAHE(clip_limit=4.0, tile_grid_size=(4, 4)),
        t.ToPILImage(),
    ])

    aug = torchvision.transforms.RandomApply([
        t.ColorJitter(brightness=0.5, contrast=0.7),
        t.RandomRotation(120),
        t.RandomResizedCrop(config.image_size,
                            scale=(0.6, 1.0),
                            ratio=(0.75, 1.33),
                            interpolation=2),
        t.RandomHorizontalFlip(),
        t.RandomVerticalFlip(),
    ],
                                             p=0.5)
    aug = t.Compose([
        aug,
        ZscoreNormalize(),
        t.ToTensor(),
    ])

    loader = dataset_loader(config, transform, aug)
    # base
    Base = base(config, loader)
    # logger
    logger = Logger(
        os.path.join(os.path.join(config.save_path, 'logs/'), 'logging.txt'))
    logger(config)

    # automatically resume model from the latest one
    start_epoch = 0
    pathologies = loader.train_set.dataset.pathologies
    count_train = count_instance_num(loader.train_set)
    count_val = count_instance_num(loader.val_set)
    logger(('all_train', len(loader.train_set) + len(loader.val_set), 'class:',
            pathologies, '  num:', count_train + count_val))
    logger(('train:', len(loader.train_set), 'class:', pathologies, '  num:',
            count_train))
    logger(('validation:', len(loader.val_set), 'class:', pathologies,
            '  num:', count_val))
    logger(pathologies)

    root, _, files = os_walk(Base.save_model_path)
    if len(files) > 0:
        # get indexes of saved models
        indexes = []
        for file in files:
            indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

        # remove the bad-case and get available indexes
        model_num = len(Base.model_list)
        available_indexes = copy.deepcopy(indexes)
        for element in indexes:
            if indexes.count(element) < model_num:
                available_indexes.remove(element)

        available_indexes = sorted(list(set(available_indexes)), reverse=True)
        unavailable_indexes = list(
            set(indexes).difference(set(available_indexes)))

        if len(
                available_indexes
        ) > 0 and config.mode != '5fold':  # resume model from the latest model
            Base.resume_model(available_indexes[0])
            start_epoch = available_indexes[0]
            logger(
                'Time: {}, automatically resume training from the latest step (model {})'
                .format(time_now(), available_indexes[0]))
            logger('Time: {},read train indices from /dataset'.format(
                time_now()))
            logger('Time: {},read train indices from /dataset'.format(
                time_now()))
            loader.train_set.indices = np.load(
                os.path.join(config.save_path, 'dataset', 'train.npy'))
            loader.train_set.dataset.idxs = np.load(
                os.path.join(config.save_path, 'dataset', 'train_idx.npy'))
            loader.train_set.dataset.labels = np.load(
                os.path.join(config.save_path, 'dataset', 'train_labels.npy'))

            loader.val_set.indices = np.load(
                os.path.join(config.save_path, 'dataset', 'test.npy'), )
            loader.val_set.dataset.idxs = np.load(
                os.path.join(config.save_path, 'dataset', 'test_idx.npy'))
            loader.val_set.dataset.labels = np.load(
                os.path.join(config.save_path, 'dataset', 'test_labels.npy'))

            count_train = count_instance_num(loader.train_set)
            count_val = count_instance_num(loader.val_set)
            logger(('all: num:', count_train + count_val))
            logger(('train: num:', count_train))
            logger(('test: num:', count_val))
    else:
        logger('Time: {}, there are no available models'.format(time_now()))
        logger('Time: {},write train indices in /dataset/train.npy'.format(
            time_now()))
        logger('Time: {},write train indices in /dataset/train_idx.npy'.format(
            time_now()))
        logger(
            'Time: {},write train indices in /dataset/train_labels.npy'.format(
                time_now()))
        logger('Time: {},write test indices in /dataset/test.npy'.format(
            time_now()))
        logger('Time: {},write test indices in /dataset/test_idx.npy'.format(
            time_now()))
        logger(
            'Time: {},write test indices in /dataset/test_labels.npy'.format(
                time_now()))

        np.save(os.path.join(config.save_path, 'dataset', 'train.npy'),
                np.array(loader.train_set.indices))
        np.save(os.path.join(config.save_path, 'dataset', 'train_idx.npy'),
                np.array(loader.train_set.dataset.idxs))
        np.save(os.path.join(config.save_path, 'dataset', 'train_labels.npy'),
                loader.train_set.dataset.labels)
        np.save(os.path.join(config.save_path, 'dataset', 'test.npy'),
                np.array(loader.val_set.indices))
        np.save(os.path.join(config.save_path, 'dataset', 'test_idx.npy'),
                np.array(loader.val_set.dataset.idxs))
        np.save(os.path.join(config.save_path, 'dataset', 'test_labels.npy'),
                loader.val_set.dataset.labels)

    if config.mode == 'train':
        # get all the id in dataset
        dataset_to_split = [i for i, _ in enumerate(loader.all_set)]
        # random split them to 5 folds
        train_ids_by_fold = []
        test_ids_by_fold = []
        test_cache = []
        for data_id in range(5):
            train_cache = list(set(dataset_to_split) - set(test_cache))
            test_part = random.sample(train_cache,
                                      int(len(dataset_to_split) / 5))
            test_cache = test_cache + test_part
            train_part = list(set(dataset_to_split) - set(test_part))
            train_ids_by_fold.append(train_part)
            test_ids_by_fold.append(test_part)

        for fold_id in range(5):
            # re-initialize after final test
            start_epoch = 0
            Base = base(config, loader)
            loader.train_set.indices = train_ids_by_fold[fold_id]
            loader.val_set.indices = test_ids_by_fold[fold_id]

            logger('**********' * 3 + '5fold_train_fold_' + str(fold_id) +
                   '**********' * 3)
            for current_step in range(start_epoch,
                                      config.joint_training_steps):
                # save model every step. extra models will be automatically deleted for saving storage
                Base.save_model(current_step)
                logger('**********' * 3 + 'train' + '**********' * 3)
                train_titles, train_values = train_a_ep(
                    config, Base, loader, current_step)
                logger('Time: {};  Step: {};  {}'.format(
                    time_now(), current_step,
                    analyze_names_and_meter(train_titles, train_values)))
                logger('')
                if (current_step) % 3 == 0:
                    logger('**********' * 3 + 'test' + '**********' * 3)
                    test_titles, test_values, confusion_matrix, metric_values = test_a_ep(
                        config, Base, loader, current_step)
                    logger('Time: {};  Step: {};  {}'.format(
                        time_now(), current_step,
                        analyze_names_and_meter(test_titles, test_values)))
                    logger(
                        'Time: {};  Step: {}; acc:{}; Precision:{}, Recall:{}, f1:{},Specificity:{}, FPR:{}'
                        .format(time_now(), current_step, metric_values[0],
                                metric_values[1], metric_values[2],
                                metric_values[3], metric_values[4],
                                metric_values[5]), '.3f')
                    logger(confusion_matrix)
                    logger('')

    elif config.mode == 'test':
        logger('**********' * 3 + 'test' + '**********' * 3)
        test_titles, test_values, confusion_matrix, metric_values = test_a_ep(
            config, Base, loader, start_epoch)
        logger('Time: {};  Step: {};  {}'.format(
            time_now(), start_epoch,
            analyze_names_and_meter(test_titles, test_values)))

        logger(
            'Time: {};  Step: {}; acc:{}; Precision:{}, Recall:{}, f1:{}, Specificity:{}, FPR:{}'
            .format(time_now(), start_epoch, metric_values[0],
                    metric_values[1], metric_values[2], metric_values[3],
                    metric_values[4], metric_values[5]), '.3f')
        logger(confusion_matrix)
        logger('')

    elif config.mode == 'localize':
        logger('**********' * 3 + 'localize' + '**********' * 3)
        masks = [
            os.path.join("./datasets/Localize2/Masks", i)
            for i in os.listdir("./datasets/Localize2/Masks")
        ]
        masks.sort()

        test_titles, test_values, = localize_penumonia(config, Base, loader,
                                                       start_epoch)
        logger('Time: {};  Step: {};  {}'.format(
            time_now(), start_epoch,
            analyze_names_and_meter(test_titles, test_values)))
        # logger(confusion_matrix)
        logger('')