コード例 #1
0
 def __init__(self) -> None:
     """Initialize the variables."""
     self.consts = UserPrefixConst()
     self.config = LoadConfig()
     self.logging = Logging()
     self.myclient = pymongo.MongoClient(self.config.mogo_db_info())
     self.mydb = self.myclient[self.config.mogo_db_name()]
コード例 #2
0
ファイル: logger.py プロジェクト: MostafaTaheri/aws-python
class Logging:
    """Customise log information.

    Example:
        log = Logging()
        log.error(message=Exception)
    """
    def __init__(self):
        self.config = LoadConfig()
        self._basic_config()

    def error(self, message):
        """Records error log."""
        logging.error(message)

    def warning(self, message):
        """Records warning log."""
        logging.warning(message)

    def info(self, message):
        """Records info log."""
        logging.info(message)

    def _basic_config(self):
        """Sets basic config for logging."""
        logging.basicConfig(filename=self.config.logging_info(),
                            filemode=self.config.logging_mode(),
                            format=self.config.logging_format(),
                            datefmt=self.config.logging_date_format())
コード例 #3
0
 def __init__(self):
     """Initialize the variables."""
     self.consts = BucketConst()
     self.config = LoadConfig()
     self.access_key = self.config.bucket_access_key()
     self.secret_key = self.config.bucket_secret_key()
     self.endpoint = self.config.bucket_endpoint()
     self.service_name = self.config.bucket_service_name()
     self.logging = Logging()
     self.session = boto3.session.Session()
     self.client = self.session.client(
         service_name=self.service_name,
         aws_access_key_id=self.access_key,
         aws_secret_access_key=self.secret_key,
         endpoint_url=self.endpoint)
コード例 #4
0
    def onCREATE(self):
        """E.onCREATE()

        Create a new Environment. The Environment path given
        by self.path must not already exist.
        """

        # Create the directory structure
        os.makedirs(self.path)
        os.chdir(self.path)
        os.mkdir(os.path.join(self.path, "log"))
        os.mkdir(os.path.join(self.path, "conf"))

        # Create a few files
        createFile(os.path.join(self.path, "VERSION"), "%d" % self.version)
        createFile(os.path.join(self.path, "README"),
                   "This directory contains a %s Environment." % self.envname)

        # Setup the default configuration
        configfile = os.path.join(self.path, "conf", "%s.ini" % self.envname)
        createFile(configfile)
        self.config = Config(configfile)
        self.manager += self.config
        self.push(LoadConfig(), "load", "config")
        for section in CONFIG:
            if not self.config.has_section(section):
                self.config.add_section(section)
            for option, value in CONFIG[section].iteritems():
                if type(value) == str:
                    value = value % {"name": self.envname}
                self.config.set(section, option, value)
        self.push(SaveConfig(), "save", "config")

        self.push(Created(), "created", self.channel)
コード例 #5
0
ファイル: analysis.py プロジェクト: tlook/bapsfdataanalysis
 def __init__(self, configfile):
     self.c = LoadConfig(configfile)
     print(f'Loaded config: {configfile}')
     self.DATA_PATH_TEMP = f'{self.c.USER_DIR}{self.c.DATA_SRC_FILE}/{self.c.DATA_SRC_FILE}'
     self.RAW_SIG_PLOT_DATA_FILE = f'{self.DATA_PATH_TEMP}_raw_sigs.npy'
     self.FREQ_BIN_FILE = f'{self.DATA_PATH_TEMP}_freq_bin.npy'
     self.AVG_SPEC_FILE = f'{self.DATA_PATH_TEMP}_avg_spec.npy'
     self.PHI_FILE = f'{self.DATA_PATH_TEMP}_phis.npy'
     self.RMS_FILE = f'{self.DATA_PATH_TEMP}_rms.npy'
     self.S1_FILE = f'{self.DATA_PATH_TEMP}_s1s.npy'
     os.makedirs(f'{self.c.USER_DIR}{self.c.DATA_SRC_FILE}', exist_ok=True)
     self.signals, self.xyzs, self.t, self.dt = self.extract_raw_data()
     self.locs = np.array(
         [self.location(xyz[0], xyz[1]) for xyz in self.xyzs])
     print('Making directory structure...')
     for loc in self.locs:
         os.makedirs(self.save_dir(loc), exist_ok=True)
     print('Directories made')
     print('Loading Phis...')
     self.phis, self.s1s = self.extract_flucs()
     print('Loading spec')
     self.avg_spec, self.freq_bin = self.fluc_analysis()
     print('Loading rms')
     self.rms = self.rms_analysis()
     print(f'completed extracting data for self.c.DATA_SRC_FILE')
コード例 #6
0
    def onLOAD(self, verify=False):
        """E.onLOAD(verify=False)

        Load the Environment. Load the configuration and logging
        components. If verify=True, verify the Environment first.
        """

        if verify:
            self.push(Verify(), "verify", self.channel)

        os.chdir(self.path)

        # Create Config Component
        configfile = os.path.join(self.path, "conf", "%s.ini" % self.envname)
        self.config = Config(configfile)
        self.manager += self.config
        self.push(LoadConfig(), "load", "config")

        # Create Logger Component
        logname = self.envname
        logtype = self.config.get("logging", "type", "file")
        loglevel = self.config.get("logging", "level", "INFO")
        logfile = self.config.get("logging", "file", "/dev/null")
        logfile = logfile % {"name": self.envname}
        if not os.path.isabs(logfile):
            logfile = os.path.join(self.path, logfile)
        self.log = Logger(logfile, logname, logtype, loglevel)
        self.manager += self.log

        self.push(Loaded(), "loaded", self.channel)
コード例 #7
0
def LoginOTRS(TicketID):

    ## Stage Configuration file for Login Parameters
    client = LoadConfig.Stage()

    ## Create Session and Get Ticket Data
    client.session_create()
    ticket = client.ticket_get_by_id(TicketID, articles=True)

    return client, ticket
コード例 #8
0
ファイル: AlienVault.py プロジェクト: vaelwolf/iocspector
def Main(IPList, TicketID):
    conf = LoadConfig.Load()
    otx = OTXv2(conf["api_keys"]["AlienVaultAPI"])
    for IP in IPList:
        logging.info("[AlienVault] OTX Searching %s" % IP)
        result = pformat(
            otx.get_indicator_details_full(IndicatorTypes.IPv4, IP))

        otrs_functions.UpdateTicket("", "AlienVault OTX - %s Results" % IP,
                                    result, TicketID)
コード例 #9
0
ファイル: main.py プロジェクト: MostafaTaheri/aws-python
def main():
    """Initializations and configurations."""
    global config
    config = LoadConfig()
    logging.basicConfig(filename=config.logging_info(),
                        filemode=config.logging_mode(),
                        format=config.logging_format(),
                        datefmt=config.logging_date_format())
    logging.info('App Started')
コード例 #10
0
def CreateTicket(SIEM_Events):
    conf = LoadConfig.Load()
    client = Client("%s" % conf['otrs']['server'], "%s" % conf['otrs']['user'],
                    "%s" % conf['otrs']['pass'])
    client.session_create()

    with open("siem_events.csv", "rt") as events:
        data = csv.reader(events)
        for event in data:
            ticket = Ticket.create_basic(Title=event[0],
                                         Queue=event[1],
                                         State=event[2],
                                         Priority=event[3],
                                         CustomerUser=event[4])
            article = Article({"Subject": event[5], "Body": event[6]})
            logging.info(client.ticket_create(ticket, article))
            sleep(30)
コード例 #11
0
def RequestIP(IP):

    ## Load Configuration and set VirusTotal configuration
    conf = LoadConfig.Load()
    url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'
    params = { 'apikey': conf["api_keys"]["VirusTotalAPI"], 'ip': IP }

    ## Send Request to VirusTotal and Retrieve Response
    response = requests.get(url, params=params)

    ## Check if Response is valid
    if str(response) == "<Response [200]>":
        return response.json()
    
    ## If Response isn't valid (e.g. API limit exceeded), log this, wait a minute, then try again
    else:
        logging.info("VirusTotal API limit exceeded. Waiting 60 seconds to try again.")
        sleep(60)
        Request(IP)
コード例 #12
0
 def Load(self, section=None):
     if sys.platform != "darwin":
         time.sleep(1)
     try:
         if section is None:
             LoadConfig(progress_handler=ProgressHandler(
                 self.AppendMessage, self.SetGauge, 0, 5))
             if config.connection is None:
                 wx.PostEvent(self, self.LoadedEvent(result=None))
                 return
         result = Load(
             ProgressHandler(self.AppendMessage, self.SetGauge, 5, 50))
     except Exception, e:
         traceback.print_exc()
         try:
             self.info.AppendText(str(e) + "\n")
         except:
             self.info.AppendText("Erreur : " + repr(e) + "\n")
         result = False
コード例 #13
0
def CreateSocket():

    ## Load Configuration File
    conf = LoadConfig.Load()

    ## Log that IoCSpector is listening on the configuration-defined port
    logging.info(f"[IoCSpector] Listening on port {conf['LPort']}")

    ## Start Socket Listener on Port from Configuration Port
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.bind((socket.gethostname(), conf['LPort']))
    s.listen(1)

    ## Receive Connections
    conn, address = s.accept()

    ## Start Multithreading to accept multiple incoming connections
    Thread(target=ProcessRequest, args=(conn, conf)).start()

    ## Close the Socket
    s.close()
コード例 #14
0
ファイル: cam_app.py プロジェクト: yt7589/dcl
    def startup(self, args):
        i_debug = 18
        if 1 == i_debug:
            # 为无锡所招标预留功能开发
            app = WxsApp()
            app.startup(args)
            return
        print('模型热力图绘制应用 v0.1.0')
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'
        args = self.parse_args()
        # arg_dict = vars(args)
        args.train_num_workers = 0
        args.val_num_workers = 0
        print(args, flush=True)
        Config = LoadConfig(args, 'train')
        Config.cls_2 = args.cls_2
        Config.cls_2xmul = args.cls_mul
        assert Config.cls_2 ^ Config.cls_2xmul
        transformers = load_data_transformers(args.resize_resolution,
                                              args.crop_resolution,
                                              args.swap_num)
        # inital dataloader
        train_set = dataset(Config = Config,\
                            anno = Config.train_anno,\
                            common_aug = transformers["common_aug"],\
                            swap = transformers["swap"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["train_totensor"],\
                            train = True)
        trainval_set = dataset(Config = Config,\
                            anno = Config.val_anno,\
                            common_aug = transformers["None"],\
                            swap = transformers["None"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["val_totensor"],\
                            train = False,
                            train_val = True)
        val_set = dataset(Config = Config,\
                          anno = Config.val_anno,\
                          common_aug = transformers["None"],\
                          swap = transformers["None"],\
                            swap_size=args.swap_num, \
                          totensor = transformers["test_totensor"],\
                          test=True)
        dataloader = {}
        dataloader['train'] = torch.utils.data.DataLoader(train_set,\
                                                    batch_size=args.train_batch,\
                                                    shuffle=True,\
                                                    num_workers=args.train_num_workers,\
                                                    collate_fn=collate_fn4train if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['train'], 'total_item_len', len(train_set))
        dataloader['trainval'] = torch.utils.data.DataLoader(trainval_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4val if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['trainval'], 'total_item_len', len(trainval_set))
        setattr(dataloader['trainval'], 'num_cls', Config.num_brands)
        dataloader['val'] = torch.utils.data.DataLoader(val_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4test if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['val'], 'total_item_len', len(val_set))
        setattr(dataloader['val'], 'num_cls', Config.num_brands)
        cudnn.benchmark = True
        print('Choose model and train set', flush=True)
        print('Choose model and train set', flush=True)
        model = MainModel(Config)

        # load model
        if (args.resume is None) and (not args.auto_resume):
            print('train from imagenet pretrained models ...', flush=True)
        else:
            if not args.resume is None:
                resume = args.resume
                print('load from pretrained checkpoint %s ...' % resume,
                      flush=True)
            elif args.auto_resume:
                resume = self.auto_load_resume(Config.save_dir)
                print('load from %s ...' % resume, flush=True)
            else:
                raise Exception("no checkpoints to load")

            model_dict = model.state_dict()
            pretrained_dict = torch.load(resume)
            print('train.py Ln193 resume={0};'.format(resume))
            pretrained_dict = {
                k[7:]: v
                for k, v in pretrained_dict.items() if k[7:] in model_dict
            }
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
        print('Set cache dir', flush=True)
        time = datetime.datetime.now()
        filename = '%s_%d%d%d_%s' % (args.cam, time.month, time.day, time.hour,
                                     Config.dataset)
        save_dir = os.path.join(Config.save_dir, filename)
        print('save_dir: {0} + {1};'.format(Config.save_dir, filename))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        model.cuda()
        cam_main_model = model
        cam_model = model.model
        model = nn.DataParallel(model)
        # optimizer prepare
        if Config.use_backbone:
            ignored_params = list(map(id, model.module.classifier.parameters())) \
                        + list(map(id, model.module.brand_clfr.parameters()))
        else:
            ignored_params1 = list(
                map(id, model.module.classifier.parameters()))
            ignored_params1x = list(
                map(id, model.module.brand_clfr.parameters()))
            ignored_params2 = list(
                map(id, model.module.classifier_swap.parameters()))
            ignored_params3 = list(map(id, model.module.Convmask.parameters()))
            ignored_params = ignored_params1 + ignored_params1x + ignored_params2 + ignored_params3
        print('the num of new layers:', len(ignored_params), flush=True)
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.module.parameters())
        lr_ratio = args.cls_lr_ratio
        base_lr = args.base_lr
        momentum = 0.9
        if Config.use_backbone:
            optimizer = optim.SGD(
                [{
                    'params': base_params
                }, {
                    'params': model.module.classifier.parameters(),
                    'lr': base_lr
                }, {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': base_lr
                }],
                lr=base_lr,
                momentum=momentum)
        else:
            optimizer = optim.SGD([
                {
                    'params': base_params
                },
                {
                    'params': model.module.classifier.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.classifier_swap.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.Convmask.parameters(),
                    'lr': lr_ratio * base_lr
                },
            ],
                                  lr=base_lr,
                                  momentum=momentum)

        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=args.decay_step,
                                               gamma=0.1)
        # *******************
        # *******************
        print('model: {0};'.format(cam_model))
        print('avgpoo: {0};'.format(cam_main_model.avgpool))
        headers = {
            'avgpool': cam_main_model.avgpool,
            'classifier': cam_main_model.brand_clfr
        }
        grad_cam = GradCam(model=cam_model, feature_module=cam_model[7], \
                       target_layer_names=["2"], headers=headers, use_cuda=True)
        # 读入图片数据
        img = None
        img_file = '/media/ps/0A9AD66165F33762/yantao/dcl/support/ds_files/wxs_ds/head/car/d00/d00/d00/d00/d96/SC7168CH5_冀B591C5_02_120000100604_120000702916290242.jpg'
        with open(img_file, 'rb') as f:
            with Image.open(f) as img:
                img = img.convert('RGB')

        crop_reso = 224
        to_tensor = transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        img_obj = to_tensor(img)
        input = img_obj.reshape(1, 3, 224, 224)
        input.cuda()
        input.requires_grad_(True)
        print('input: {0};'.format(input.shape))
        # If None, returns the map for the highest scoring category.
        # Otherwise, targets the requested index.
        target_index = None
        mask = grad_cam(input, target_index)
        #
        self.show_cam_on_image(img_file, mask)
        #
        gb_model = GuidedBackpropReLUModel(model=cam_main_model, use_cuda=True)
        gb = gb_model(input, index=target_index)
        gb = gb.transpose((1, 2, 0))
        cam_mask = cv2.merge([mask, mask, mask])
        cam_gb = self.deprocess_image(cam_mask * gb)
        gb = self.deprocess_image(gb)
        cv2.imwrite('gb.jpg', gb)
        cv2.imwrite('cam_gb.jpg', cam_gb)

        print('^_^ The End! 002 ^_^')
コード例 #15
0
    CreateSocket()


if __name__ == '__main__':

    ## Check if another instance is running and kill process if it is
    logging.info(
        f"[PID: {os.getpid()} Checking if another instance is running...")
    try:
        self = Check.SingleInstance()
    except:
        exit()

    ## Welcome Greeting
    logging.info("[IoCSpector] Welcome to IoCSpector!")
    logging.info("[IoCSpector] Author: Vaelwolf")
    logging.info("[IoCSpector] All rights reserved")

    ## Load Configuration File and iterate through values
    conf = LoadConfig.Load()
    for module, value in conf['modules'].items():
        if value[0] == True:
            logging.info(
                f"[+] Loaded Module '{module}' from configuration file")
        else:
            logging.info(
                f"[+] Skipping Module '{module}' from configuration file")

    ## Launch Main Socket Listener (starts program from this function)
    CreateSocket()
コード例 #16
0
ファイル: train.py プロジェクト: chaseshen-ai/DCL
    choosed_w = weight_list[acc_list.index(max(acc_list))]
    return os.path.join(load_dir, choosed, choosed_w)


if __name__ == '__main__':
    args = parse_args()
    print(args, flush=True)
    # args.cls_mul=True
    # args.train_num_workers=True
    # args.resize_resolution=147
    # args.crop_resolution=129
    args.dataset = 'ItargeCar_0520_multi'
    args.use_backbone = False
    args.multi = True
    args.cls_mul = True
    Config = LoadConfig(args, 'train')
    Config.brand_relation = args.brand_relation
    Config.cls_2 = args.cls_2
    Config.cls_2xmul = args.cls_mul
    Config.log_dir = args.log_dir
    Config.no_loc = args.no_loc
    Config.add_images = args.add_images
    Config.size = (args.crop_resolution, args.crop_resolution)
    assert Config.cls_2 ^ Config.cls_2xmul

    os.environ['CUDA_DEVICE_ORDRE'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # sw define
    sw_log = Config.log_dir
    sw = SummaryWriter(log_dir=sw_log)
コード例 #17
0
class Bucket:
    """uses operations of bucket.

    Example:
        bucket = Bucket()
        bucket.choose_operation(event_name='create_bucket',
            bucket_name='s3', region_name='us-west-2')
    """
    def __init__(self):
        """Initialize the variables."""
        self.consts = BucketConst()
        self.config = LoadConfig()
        self.access_key = self.config.bucket_access_key()
        self.secret_key = self.config.bucket_secret_key()
        self.endpoint = self.config.bucket_endpoint()
        self.service_name = self.config.bucket_service_name()
        self.logging = Logging()
        self.session = boto3.session.Session()
        self.client = self.session.client(
            service_name=self.service_name,
            aws_access_key_id=self.access_key,
            aws_secret_access_key=self.secret_key,
            endpoint_url=self.endpoint)

    async def choose_operation(self, event_name: AnyStr, **kwargs) -> Dict:
        """Chooses events based on event_name for executes async event."""
        try:
            if event_name == self.consts.create_by_region:
                return await asyncio.gather(
                    self._create_by_region(
                        bucket_name=kwargs.get("bucket_name"),
                        region=kwargs.get("region")))
            elif event_name == self.consts.create:
                return await asyncio.gather(
                    self._create(bucket_name=kwargs.get("bucket_name")))
        except Exception as Ex:
            self.logging.error(Ex)
            error = CustomException(self.consts.exception_status,
                                    self.consts.exception_message)
            return Tools.packer(status=error.fault_code,
                                message=error.fault_message)

    async def _create_by_region(self,
                                bucket_name: AnyStr,
                                region: AnyStr = None) -> Dict:
        """Creates an S3 bucket in a specified region.

        If a region is not specified, the bucket is created in the S3 default
        region (us-east-1).

        Parameters:
            bucket_name: The name of bucket to create.
            region: String region to create bucket in, e.g., 'us-west-2'.

        Returns:
            A dictionary of information.
        """
        try:
            self.location = {"LocationConstraint": region}
            self.client.create_bucket(Bucket=bucket_name,
                                      CreateBucketConfiguration=self.location)
        except ClientError as ClientException:
            self.logging.error(ClientException)
            error = CustomException(self.consts.exception_status,
                                    self.consts.exception_message)
            return Tools.packer(status=error.fault_code,
                                message=error.fault_message)
        else:
            return Tools.packer(
                status=self.consts.status_code.get("success"),
                message=self.consts.status_message.get("success"))

    async def _create(self, bucket_name: AnyStr) -> Dict:
        """Creates an S3 bucket.

        Parameters:
            bucket_name: The name of bucket to create.

        Returns:
            A dictionary of information.
        """
        try:
            self.client.create_bucket(Bucket=bucket_name)
        except ClientError as ClientException:
            self.logging.error(ClientException)
            error = CustomException(self.consts.exception_status,
                                    self.consts.exception_message)
            return Tools.packer(status=error.fault_code,
                                message=error.fault_message)
        else:
            return Tools.packer(
                status=self.consts.status_code.get("success"),
                message=self.consts.status_message.get("success"))

    def _get_bucket_list(self) -> List:
        """Gets list of buckets."""
        try:
            self.response = self.client.list_buckets()
            self.buckets = [
                bucket["Name"] for bucket in self.response['Buckets']
            ]
        except ClientError as ClientException:
            self.logging.error(ClientException)
            return None
        return self.buckets

    async def validation_bucket(self, bucket_name: AnyStr) -> bool:
        """Checks the conditions of bucket name and it is not exist.

        Parameters:
            bucket_name: The name of bucket

        Returns:
            True if all condition become true, else False.
        """
        try:
            self.bucket_list = self._get_bucket_list()

            if len(bucket_name) < 3:
                return False
            if self.bucket_list and bucket_name in list(
                    filter(lambda x: x, self.bucket_list)):
                return False
            if not Tools.is_english(context=bucket_name):
                return False
            if Tools.is_invalid_character(context=bucket_name):
                return False
        except ClientError as ClientException:
            self.logging.error(ClientException)
            return False
        return True
コード例 #18
0
    log_progress(2500, 0.6, 0.36)
    log_progress(2600, 0.7, 0.49)
    log_progress(2700, 0.8, 0.64)
    log_progress(2800, 0.9, 0.81)
    log_progress(2900, 0.95, 0.95*0.95)
    log_progress(3000, 1.0, 1.0)
    '''
    print('打印网络结构')
    summary(model, (3, 448, 448))

if __name__ == '__main__':
    i_debug = 10
    
    args = parse_args()
    print(args, flush=True)
    Config = LoadConfig(args, 'train')
    Config.cls_2 = args.cls_2
    Config.cls_2xmul = args.cls_mul
    assert Config.cls_2 ^ Config.cls_2xmul

    transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)

    # inital dataloader
    train_set = dataset(Config = Config,\
                        anno = Config.train_anno,\
                        common_aug = transformers["common_aug"],\
                        swap = transformers["swap"],\
                        totensor = transformers["train_totensor"],\
                        train = True)

    trainval_set = dataset(Config = Config,\
コード例 #19
0
if __name__ == '__main__':
    # Get params
    # target_example = 0  # Snake
    # (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
    #     get_example_params(target_example)
    # # Grad cam
    # grad_cam = GradCam(pretrained_model)
    # # Generate cam mask
    # cam = grad_cam.generate_cam(prep_img, target_class)
    # # Save mask
    # save_class_activation_images(original_image, cam, file_name_to_export)
    # print('Grad cam completed')

    args = parse_args()
    Config = LoadConfig(args, args.version)
    Config.cls_2 = True
    Config.cls_2xmul = False

    models = [
        'wide_resnet50_2', 'resnet50', 'resnext50_32x4d', 'se_resnext101_32x4d'
    ]
    weights = {
        'resnet50':
        'net_model/resnet50/weights_65_109_0.7044_0.8736.pth',
        'resnext50_32x4d':
        'net_model/resnext50_32x4d/weights_59_1549_0.7046_0.8772.pth',
        'se_resnext101_32x4d':
        'net_model/se_resnext101_32x4d/weights_18_4583_0.7152_0.8783.pth',
        'wide_resnet50_2':
        'net_model/wide_resnet50_2/weights_58_4051_0.7255_0.8865.pth'
コード例 #20
0
ファイル: train.py プロジェクト: adrien1018/beta-tetris
            if (update + 1) % 2 == 0:
                self.set_optim(self.c.lr(), self.c.reg_l2())
                self.set_game_param(self.c.right_gain(), self.c.fix_prob(),
                                    self.c.neg_mul(), self.c.step_reward())
                self.set_weight_param(self.c.entropy_weight(),
                                      self.c.prob_reg_weight(),
                                      self.c.target_prob_weight(),
                                      self.c.gamma(), self.c.lamda())
            if (update + 1) % 25 == 0: logger.log()
            if (update + 1) % 200 == 0: experiment.save_checkpoint()


import argparse

if __name__ == "__main__":
    conf = LoadConfig()[0]
    m = Main(conf, args['name'])
    experiment.add_model_savers({
        'model':
        TorchSaver('model', m.model),
        'scaler':
        TorchSaver('scaler', m.scaler),
        'optimizer':
        TorchSaver('optimizer', m.optimizer),
    })
    if len(args['uuid']): experiment.load(args['uuid'], args['checkpoint'])
    with experiment.start():
        try:
            m.run_training_loop()
        except Exception as e:
            print(traceback.format_exc())
コード例 #21
0
                        type=int,
                        help='specify a range')
    parser.add_argument('--use-adam', action='store_true')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    print(args)

    args.version = 'val'
    if args.save_suffix == '':
        raise Exception('**** miss --ss save suffix is needed. ')

    Config = LoadConfig(args, args.version)

    if args.version == 'test':
        Config.rawdata_root = 'dataset/MTFood-1000/test'

    transformers = load_data_transformers(args.resize_resolution,
                                          args.crop_resolution, args.swap_num)
    data_set = dataset(Config,\
                       anno=Config.val_anno if args.version == 'val' else Config.test_anno ,\
                       common_aug=transformers["None"],\
                       swap=transformers["None"],\
                       totensor=transformers['val_totensor'],\
                       test=True)

    # dataloader = torch.utils.data.DataLoader(data_set,\
    #                                          batch_size=args.batch_size,\
コード例 #22
0
class UserPrefix:
    """Detects users and prefixes from mongoDB and validate that.

    Example:
        user_prefix = UserPrefix()
        response = user_prefix.user_prefix_validation(
            username='******', prefix='arvan12'
        )
    """
    def __init__(self) -> None:
        """Initialize the variables."""
        self.consts = UserPrefixConst()
        self.config = LoadConfig()
        self.logging = Logging()
        self.myclient = pymongo.MongoClient(self.config.mogo_db_info())
        self.mydb = self.myclient[self.config.mogo_db_name()]

    def _find_user(self, name: AnyStr) -> List:
        """Finds user information.

        Returns:
            A list of result query.
        """
        try:
            self.mycollection = self.mydb[self.consts.collection_name.get(
                "users")]
            return [
                x for x in self.mycollection.find({
                    "name": name
                }, {
                    "id": 1
                }).limit(1)
            ]
        except Exception as Ex:
            self.logging.error(Ex)
            error = CustomException(self.consts.exception_status,
                                    self.consts.exception_message)
            return Tools.packer(status=error.fault_code,
                                message=error.fault_message)

    def _find_prefix(self, prefix: AnyStr) -> List:
        """Finds prefix information.

        Returns:
            A list of result query.
        """
        try:
            self.mycollection = self.mydb[self.consts.collection_name.get(
                "prefixes")]
            return [
                x for x in self.mycollection.find({
                    "prefix": prefix
                }, {
                    "id": 1
                }).limit(1)
            ]
        except Exception as Ex:
            self.logging.error(Ex)
            error = CustomException(self.consts.exception_status,
                                    self.consts.exception_message)
            return Tools.packer(status=error.fault_code,
                                message=error.fault_message)

    def _find_user_prefix(self, query: Dict) -> List:
        """Finds user_prefix information.

        Returns:
            A list of result query.
        """
        try:
            self.mycollection = self.mydb[self.consts.collection_name.get(
                "user_prefixes")]
            return [
                x for x in self.mycollection.find(query, {
                    "user_id": 1,
                    "prefix_id": 1,
                    "is_allowed": 1
                })
            ]
        except Exception as Ex:
            self.logging.error(Ex)
            return [
                CustomException(self.consts.exception_status,
                                self.consts.exception_message)
            ]

    async def user_prefix_validation(self, user_name: AnyStr,
                                     prefix: AnyStr) -> bool:
        """Validates allowed prefix for user.

        Returns:
            True if allowed else False.
        """
        try:
            self.user_id = self._find_user(name=user_name)[0]["id"]
            self.prefix = self._find_prefix(prefix=prefix)
            self.prefix_id = 0

            if not self.prefix and user_name is UserPrefixConst.exception_users[
                    0]:
                return True
            else:
                self.prefix_id = self.prefix[0]["id"]

            self.query = dict({
                '$and': [{
                    'user_id': self.user_id
                }, {
                    'prefix_id': self.prefix_id
                }]
            })

            self.user_prefix = self._find_user_prefix(query=self.query)

            if user_name not in self.consts.exception_users and \
                    self.consts.Exception_prefix in prefix[0:5]:
                return False
            elif self.query is None and user_name is not \
                    self.consts.exception_users[0]:
                return False
            elif list(
                    filter(lambda x: x["is_allowed"] is False,
                           self.user_prefix)):
                return False
            else:
                return True
        except Exception as Ex:
            self.logging.error(Ex)
            return False
コード例 #23
0
                    type=int, help='specify a range')
    parser.add_argument('--use-adam', action='store_true')
    parser.add_argument('--cls_2', dest='cls_2', action='store_false')
    parser.add_argument('--cls_mul', dest='cls_mul', action='store_true')
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()
    print(args)

    args.version = 'test'
    if args.save_suffix == '':
        raise Exception('**** miss --ss save suffix is needed. ')

    Config = LoadConfig(args, args.version)
    Config.cls_2 = args.cls_2
    Config.cls_2xmul = args.cls_mul

    if args.version == 'test':
        Config.rawdata_root = 'dataset/MTFood-1000/data/test'

    transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)
    data_set = dataset(Config,\
                       anno=Config.val_anno if args.version == 'val' else Config.test_anno ,\
                       common_aug=transformers["None"],\
                       swap=transformers["None"],\
                       totensor=transformers['val_totensor'],\
                       test=True)

    # dataloader = torch.utils.data.DataLoader(data_set,\
コード例 #24
0
ファイル: logger.py プロジェクト: MostafaTaheri/aws-python
 def __init__(self):
     self.config = LoadConfig()
     self._basic_config()
コード例 #25
0
                        metavar=('swap1', 'swap2'),
                        type=int,
                        help='specify a range')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    print(args)
    if args.submit:
        args.version = 'test'
        if args.save_suffix == '':
            raise Exception('**** miss --ss save suffix is needed. ')

    Config = LoadConfig(args, args.version)
    transformers = load_data_transformers(args.resize_resolution,
                                          args.crop_resolution, args.swap_num)
    data_set = dataset(Config,\
                       anno=Config.val_anno if args.version == 'val' else Config.test_anno ,\
                       unswap=transformers["None"],\
                       swap=transformers["None"],\
                       totensor=transformers['test_totensor'],\
                       test=True)

    dataloader = torch.utils.data.DataLoader(data_set,\
                                             batch_size=args.batch_size,\
                                             shuffle=False,\
                                             num_workers=args.num_workers,\
                                             collate_fn=collate_fn4test)
コード例 #26
0
            result_path = phone_paths 

    pos_imgs = []
    for p in result_path:
        files = os.listdir(p)
        tmp = [os.path.join(p, x) for x in files if os.path.splitext(x)[-1].lower() in ['.jpg', '.jpeg', '.png']]
        print(p, ': ', len(tmp))
        pos_imgs += tmp

    return pos_imgs

if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    args = parse_args()
    print(args)    
    Config = LoadConfig(args, args.version)
    Config.use_dcl = False
    Config.use_backbone = True
    cudnn.benchmark = True
    resume = '/media/ps/0A9AD66165F33762/yantao/dcl/net_model/training_descibe_102013_CUB/weights_1_75667_0.9632_0.9796.pth'
    model = MainModel(Config)
    model_dict=model.state_dict()
    pretrained_dict=torch.load(resume)
    pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    model.cuda()
    model.train(False)
    model.eval()
    run_mode = 1 # 1 dynamic; 2 static
    if 2 == run_mode:
コード例 #27
0
                        type=float)
    parser.add_argument('--start_epoch',
                        dest='start_epoch',
                        default=0,
                        type=int)
    parser.add_argument('--tnw', dest='train_num_workers', default=8, type=int)
    parser.add_argument('--vnw', dest='val_num_workers', default=8, type=int)
    parser.add_argument('--detail', dest='discribe', default='', type=str)
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    print('\nargs:  ', args, '\n', '========' * 6)
    Config = LoadConfig(args, 'train')
    print('Config:  ', vars(Config), '\n', '========' * 6)

    model = TransModel(
        layer_num=2,
        head_num=4,
        dk_num=64,
        dv_num=64,
        model_num=2048,
        inner_num=512,
    )
    model.cuda()
    model = nn.DataParallel(model)
    train_set = Featset(sample='full')
    train_loader = torch.utils.data.DataLoader(
        train_set,
コード例 #28
0
    # args.discribe='feature'
    # args.resize_resolution=147
    # args.crop_resolution=129
    # # args.anno="/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/inference_set.csv"
    # args.result_path="/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/"
    # args.feature=True
    print(args)
    print(args.anno)
    # # todo: debug
    # args.anno = "/NAS/shenjintong/Dataset/ItargeCar/class_originbox/test_info.csv"
    # args.resume= "/NAS/shenjintong/DCL/net_model/DCL_512_448_41123_ItargeCar/model_best.pth"
    # args.CAM=True
    # args.opencv_save=True


    Config = LoadConfig(args, args.version)
    Config.cls_2xmul = True
    Config.cls_2 = False
    Config.no_loc = args.no_loc
    # sw define
    Config.size=(args.crop_resolution,args.crop_resolution)
    if args.log_dir:
        sw_log = args.log_dir
        sw = SummaryWriter(log_dir=sw_log)

    transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)

    # 由于args.version的作用只是自动选择对应的标记文件进行读取,去除version设置直接使用文件路径输入
    if args.anno:
        dataset_pd = pd.read_csv(args.anno)
    else:
コード例 #29
0
ファイル: main.py プロジェクト: MostafaTaheri/aws-python
def main():
    """Initializations and configurations."""
    global logging, load_config
    logging = Logging()
    load_config = LoadConfig()
    logging.info('App started')