def main(client):
	# file_list = ['ableton__26_12_2019__12_33_10.html','argireline__26_12_2019__16_00_19.html']
	for S3_REGION in S3_REGION_LIST:
		print(S3_REGION)
		output_dir = BATCH_OUTPUT_DIR_MOBILE.format(client)
		INPUT_DIR = os.path.join(output_dir, S3_REGION, 'page_source')
		OUTPUT_DIR = os.path.join(output_dir, S3_REGION,'showcase_result_computed')

		if os.path.exists(INPUT_DIR):
			create_dir(OUTPUT_DIR)
			for file in os.listdir(INPUT_DIR):
			# for file in file_list:
				# print(file)

				file_path = os.path.join(INPUT_DIR, file)
				with open(file_path, 'r') as f:
					page_data = f.read()

					soup = BeautifulSoup(page_data,'html.parser')
					query_clean_name = file.split('.')[0]
					get_showcase_ad_details(OUTPUT_DIR, soup, query_clean_name)
		else:
			print('{} -- No showcase results for Mobile step 5.a'.format(S3_REGION))

		print('--------------------------------------------')
def main(client):

    # file_list = ['ableton__26_12_2019__26_12_2019__16_01_08.html','acqua_di_parma__26_12_2019__20_01_05.html','becca_highlighter__26_12_2019__20_01_07.html']
    for S3_REGION in S3_REGION_LIST:
        print(S3_REGION)
        output_dir = BATCH_OUTPUT_DIR_MOBILE.format(client)

        INPUT_DIR = os.path.join(output_dir, S3_REGION, 'page_source')
        OUTPUT_DIR = os.path.join(output_dir, S3_REGION,
                                  'sponsored_result_computed')

        if os.path.exists(INPUT_DIR):
            create_dir(OUTPUT_DIR)
            for file in os.listdir(INPUT_DIR):
                # for file in file_list:
                # print(file)

                file_path = os.path.join(INPUT_DIR, file)
                with open(file_path, 'r') as f:
                    page_data = f.read()

                    soup = BeautifulSoup(page_data, 'html.parser')
                    query_clean_name = file.split('.')[0]
                    get_sponsored_ad_details(OUTPUT_DIR, soup,
                                             query_clean_name)
        else:
            print('{} -- No sponsored results for Mobile step 4.a'.format(
                S3_REGION))

        print('--------------------------------------------')
示例#3
0
def get_second_page_data(debug=False):
    """#get second page data, and save result to file ./api-data/second_page_data.json"""

    t0 = time.time()
    helper.remove_file(config.FILENAME_SECOND_PAGE_DATA)
    helper.remove_file(config.FILENAME_SECOND_PAGE_SECTIONIDS_DATA)
    helper.create_dir(config.SAVE_FILE_DIR)
    helper.my_print("total has %d grades...\n" % len(config.GRADES), debug)

    for grade in config.GRADES:
        time.sleep(config.SLEEP_TIME)
        helper.my_print("%s:" % grade, debug)

        url = helper.get_second_page_url(grade=grade)
        content = helper.get_url_content(url)  # 获取知识章节列表
        if content is None:
            helper.my_print_error("the content is None.")
        else:
            helper.save_content_to_file(config.FILENAME_SECOND_PAGE_DATA,
                                        content)
            helper.my_print(content, debug)

            # 获取知识章节对应的ID列表
            json_data = json.loads(content, encoding='utf-8')
            for course_list in json_data['list']:
                for course in course_list['list']:
                    section_id = course['courseSectionID']
                    helper.save_content_to_file(
                        config.FILENAME_SECOND_PAGE_SECTIONIDS_DATA,
                        section_id)
                    helper.my_print(section_id, debug)

        helper.my_print("", debug)

    print('Done. %f seconds cost.' % (time.time() - t0))
示例#4
0
def init():
    # Initialize the output directory
    h.create_dir(OUPTPUT_FOLDER_PATH)

    # Initializie the workbook
    workbook = Workbook(write_only=False)
    worksheets = []
    shipments = []
    for i in xrange(SHIPMENTS):
        worksheets.append(
            Worksheet(workbook.create_sheet("Shipment%d" % (i + 1), i)))
        shipments.append(Shipment(i))
    return (workbook, worksheets, shipments)
示例#5
0
    def CopyFileToDestinationDir(self):
        """
		Copies computed file to mounted s3 location on ec2
		"""
        src_file_path = os.path.join(
            BASE_DIR, ADS_COMPUTED_OUTPUT_FILE_PATH.format(self.client))
        client_output_dir = DESTINATION_ROOT_PATH.format(self.client)
        create_dir(client_output_dir)
        dest_file_path = os.path.join(client_output_dir,
                                      ADS_COMPUTED_OUTPUT_FILE)
        if os.path.exists(src_file_path):
            copy_file(src_file_path, dest_file_path)
            return dest_file_path
        else:
            return None
示例#6
0
 def run_tests(self):
     self.learner.set_epsilon(0.0)
     self.episode_dir = os.path.join(self.run_dir,
                                     'episode_' + str(self.current_episode))
     self.episode_dir = helper.create_dir(self.episode_dir)
     self.test_steps = []
     self.test_rewards = []
     for test_pos in self.params['test_positions']:
         self.init_episode()
         self.run_episode(test_pos, tuple(self.current_task['goal_pos']),
                          self.current_task['name'])
         self.test_steps.append(self.steps_in_episode)
         self.test_rewards.append(self.reward_in_episode)
     self.write_test_results()
     self.learner.save_Qs(os.path.join(self.episode_dir, 'Qs.npy'))
     # Make video from random position
     if self.params['visual']:
         self.set_status('recording')
         self.init_episode()
         self.run_episode(
             self.env.get_random_state(tuple(
                 self.current_task['goal_pos'])),
             tuple(self.current_task['goal_pos']),
             self.current_task['name'])
     self.learner.set_epsilon(self.learner.last_epsilon)
示例#7
0
def get_main_page_data(debug=False):
    """#get main page data, and save the result to file ./api-data/main_page_data.json"""

    helper.remove_file(config.FILENAME_MAIN_PAGE_DATA)  # 先删除可能已存在的文件后, 再重新保存
    helper.create_dir(config.SAVE_FILE_DIR)

    t0 = time.time()
    url = helper.get_main_page_url()
    content = helper.get_url_content(url)  # 获取首页的一级页面列表(包含年级列表,滚动图列表,学习计划列表等)
    if content is None:
        helper.my_print_error("main_page_data is None.")
    else:
        helper.save_content_to_file(config.FILENAME_MAIN_PAGE_DATA, content)
        helper.my_print(content, debug)

    print('Done. %f seconds cost.' % (time.time() - t0))
    def get_software_download_dir(self):
        """ 
        get software download directory to download OCP software bits
  
        """
        default = '/home/ansible/files'
        self.software_dir = '/home/ansible/files'
        self.software_dir = set_values(self.software_dir, default)
        dest_path_exist = check_path(self.software_dir, isdir=True)
        if dest_path_exist:
            logging.info('directory {} already exists'.format(self.software_dir))
        else:
            logging.info('Creating directory {}'.format(self.software_dir))
            create_dir(self.software_dir)

        self.inventory_dict['all']['vars']['software_src'] = self.software_dir
示例#9
0
def get_third_page_data(debug=False):
    """#get third page data, and save result to file ./api-data/third_page_data.json"""

    t0 = time.time()
    helper.remove_file(config.FILENAME_THIRD_PAGE_DATA)
    helper.remove_file(config.FILENAME_THIRD_SECTIONIDS_DATA)
    helper.create_dir(config.SAVE_FILE_DIR)

    with open(config.FILENAME_SECOND_PAGE_SECTIONIDS_DATA) as f:
        i = 0
        lines = f.readlines()
        helper.my_print("total has %d chapters...\n" % len(lines), debug)

        for line in lines:
            i += 1
            section_id = str(int(line))
            helper.my_print("line:%d sectionID:%s" % (i, section_id), debug)

            time.sleep(config.SLEEP_TIME)
            url = helper.get_third_page_url(section_id)
            content = helper.get_url_content(
                url)  # 根据某个章节的 sectionID, 获取其知识点列表
            if content is None:
                helper.my_print_error("the content is None.")
            else:
                helper.save_content_to_file(config.FILENAME_THIRD_PAGE_DATA,
                                            content)
                helper.my_print(content, debug)

                # 获取知识点对应的课程ID列表(用于根据课程ID, 获取题目列表)
                json_data = json.loads(content)
                for course in json_data['list']:
                    course_dic = {
                        'courseSectionID': course['courseSectionID'],
                        'sectionName': course['sectionName'],
                        'parentID': course['parentID']
                    }

                    data = json.dumps(course_dic, ensure_ascii=False)
                    helper.save_content_to_file(
                        config.FILENAME_THIRD_SECTIONIDS_DATA, data)
                    helper.my_print(data, debug)

            helper.my_print("", debug)

    print('Done. %f seconds cost.' % (time.time() - t0))
 def clone_repo(self, repo, repo_dir):
     """
     Clones the repo passed as an argument into repos_dir directory. Note that this method clones a basic version of
     the repo (depth=1, branch=master)
     :param repo: repository to be cloned
     :param repo_dir: directory where repository will be cloned
     :return:
     """
     project_dir = os.path.join(repo_dir, repo['project'])
     helper.create_dir(project_dir)
     clone_url = repo['cloneUrl'].split('@')[0] + ':' + urllib.quote(self.credentials['password'],
                                                              safe='') + '@' + \
                 repo['cloneUrl'].split('@')[1]
     command = "git clone --depth 1 -b master %s repos/%s/%s" % (
         pipes.quote(clone_url), pipes.quote(
             repo['project']), pipes.quote(repo['slug']))
     os.system(command)
示例#11
0
    def get_software_download_dir(self):
        """ 
        get software download directory to download OCP 4.3 software bits
  
        """
        self.clear_screen()
        default = '/home/ansible/files'
        self.software_dir = input('provide complete path of directory to download OCP 4.3 software bits\n'
                                  'default [/home/ansible/files]: ')
        self.software_dir = set_values(self.software_dir, default)
        dest_path_exist = check_path(self.software_dir, isdir=True)
        if dest_path_exist:
            logging.info('directory {} already exists'.format(self.software_dir))
        else:
            logging.info('Creating directory {}'.format(self.software_dir))
            create_dir(self.software_dir)

        self.inventory_dict['csah']['vars']['software_src'] = self.software_dir
示例#12
0
 def init_run(self):
     _logger.info("..... Starting run %s" % str(self.current_run))
     run_dir = os.path.join(self.task_dir, 'run_' + str(self.current_run))
     self.run_dir = helper.create_dir(run_dir)
     # Create run stats file: run_stats.csv
     self.run_stats_file = os.path.join(self.run_dir, 'stats_run.csv')
     self.run_steps = 0
     helper.write_stats_file(self.run_stats_file, 'episode', 'steps_total',
                             'steps_mean', 'reward_total', 'reward_mean',
                             'epsilon', 'step_count')
     self._init_run()
    def run(self):
        global S3_BUCKET_DICT

        for S3_REGION in S3_REGION_LIST:
            BUCKET = S3_BUCKET_DICT[S3_REGION]
            OUTPUT_DIR = os.path.join(self.batch_output_dir, S3_REGION)
            create_dir(OUTPUT_DIR)

            # For thread safety
            session = boto3.session.Session()

            if SERVER:
                s3 = session.resource('s3')
            else:
                s3 = session.resource(
                    's3',
                    aws_access_key_id=AWS_ACCESS_KEY_ID,
                    aws_secret_access_key=AWS_SECRET_ACCESS_KEY)

            print('{}: {}'.format(self.thread_name, BUCKET))
            bucket = s3.Bucket(BUCKET)
            files = bucket.objects.filter(Prefix=S3_PREFIX_PC)

            for file in files:
                file_key = file.key
                if '_mobile/' not in file_key and CURRENT_DATE_STAMP in file_key:
                    if 'sponsored_result' in file_key and self.client_name in file_key:
                        # print('{}: {}'.format(self.thread_name,file_key))
                        # print(file_key)
                        file_dir = file_key.split('/')[-2]
                        file_path = os.path.join(OUTPUT_DIR, file_dir)
                        create_dir(file_path)
                        file_name = file_key.split('/')[-1]
                        download_file_path = os.path.join(file_path, file_name)
                        if not os.path.exists(download_file_path):
                            obj = s3.Object(
                                BUCKET,
                                file_key).download_file(download_file_path)

        print('{} Completed'.format(self.thread_name))
示例#14
0
    def preprocess(self):
        """
        Naive preprocessor that creates the dataset for CLAMS by cloning all repos and filtering locally.
        :return:
        """
        repos_dir = os.path.join(os.getcwd(), 'repos')
        helper.create_dir(repos_dir)

        bitbucket_client = BitBucketServerClient(
            host=self.bitbucket_host,
            is_ssh=False,
            credentials=self.bitbucket_credentials)
        repos = bitbucket_client.get_bitbucket_server_repos(self.client_repos)
        self.clone_repos(bitbucket_client, repos, repos_dir)

        for project in projects_map:
            package_name = projects_map[project]['package']

            print "Removing previous session's results..."
            directory = os.path.join(os.getcwd(), 'files', project)
            helper.delete_dir(directory)
            helper.create_dir(directory)
            print "Ready to run new session!\n"

            # use the following command to filter files
            os.system("find ./repos -iname '*.java' | xargs -n16 -P8 grep -l" +
                      " \"" + package_name + "\" > " + project + ".txt")

            fname = project + ".txt"
            files_urls = {}
            if os.path.exists(fname):
                self.process_filtered_results(directory, files_urls, fname,
                                              project)

                print 'Writing files\' BitBucket Server urls to file...'
                helper.write_json(files_urls, 'files_urls', directory)
                print 'Files\' BitBucket urls are now stored in a json file!\n'
            else:
                print 'No usage examples found for ' + project
    def preprocess(self):
        """
        This method runs the preprocessor that creates the dataset for CLAMS by using Hound to filter down to specific files.
        :return:
        """
        for project in projects_map:
            package_name = projects_map[project]['package']

            print "Removing previous session's results..."
            directory = os.path.join(os.getcwd(), 'files', project)
            helper.delete_dir(directory)
            helper.create_dir(directory)
            print "Ready to run new session!\n"

            # search on Hound
            print "\nSearching on Hound..."
            hound_client = HoundClient(self.hound_host, self.hound_credentials)
            hound_query = {'q': 'import ' + package_name, 'i': 'nope', 'files': '.java', 'repos': '*'}
            json_response = hound_client.search(hound_query)
            files_urls = self.parse_hound_response(project, json_response)
            print "Search completed!\n"

            # download files from BitBucket Server
            print "Downloading files..."
            bitbucket_client = BitBucketServerClient(host=self.bitbucket_host, is_ssh=self.is_bitbucket_ssh,
                                                     credentials=self.bitbucket_credentials)
            for file_name, info in files_urls.iteritems():
                response = bitbucket_client.download_file(info)
                helper.write_file_content(response, file_name, directory, self.is_bitbucket_ssh)
            print "Files are now stored locally!\n"

            print 'Writing files\' BitBucket Server urls to file...'
            helper.write_json(files_urls, 'files_urls', directory)
            print 'Files\' BitBucket Server urls are now stored in a json file!\n'
            # sleep for 1s to avoid overloading Hound/BitBucket
            # remove in case you don't have any latency issues
            time.sleep(1)
示例#16
0
 def init_task(self):
     _logger.info("##### Starting task %s" % str(self.current_task['name']))
     task_dir = os.path.join(self.exp_dir,
                             'task_' + self.current_task['name'])
     self.task_dir = helper.create_dir(task_dir)
     self._init_task()
示例#17
0
def get_question_data(debug=False, start=1, count=0):
    """#get question data, and save result to file ./api-data/question_data.json"""

    t0 = time.time()
    with open(config.FILENAME_THIRD_SECTIONIDS_DATA) as f:
        i = 0
        lines = f.readlines()
        line_count = len(lines)

        # 检查开始索引参数 start
        try:
            start_index = int(start)
        except ValueError as e:
            helper.my_print_error(
                'Error: %s\n the "start" param must be a Integer number!' % e)
            return
        else:
            if start_index < 1 or start_index > line_count:
                helper.my_print_error(
                    'Error:  the "start" param must in range(1, len(lines)+1)')
                return

        # 检查获取条目参数 count
        try:
            limit_count = int(count)
        except ValueError as e:
            helper.my_print_error(
                'Error: %s\n the "count" param must be a Integer number!' % e)
            return
        else:
            if limit_count <= 0 or (start_index - 1 +
                                    limit_count) > line_count:
                limit_count = line_count - start_index + 1

        if start_index == 1:  # 当 start_index 值为1时, 表示重新开始获取, 此时应先清除之间存在的数据文件
            helper.remove_file(config.FILENAME_QUESTION_DATA)

        helper.create_dir(config.SAVE_FILE_DIR)
        helper.my_print("total has %d sections...\n" % line_count, debug)

        for line in lines:
            i += 1
            if i < start_index:
                continue
            elif i >= (start_index + limit_count):
                break

            json_data = json.loads(line)
            section_id = json_data['courseSectionID']
            helper.my_print("line:%d sectionID:%s" % (i, section_id), debug)

            time.sleep(config.SLEEP_TIME)
            url = helper.get_question_url(section_id)
            content = helper.get_url_content(url)  # 根据知识点的 sectionID, 获取题目对表
            if content is None:  # 若发生了异常时, 直接退出
                helper.my_print_error("the content is None.")
                helper.my_print_error(
                    "An Exception has happen. get_question_data will be exit. "
                    "the next start_index should be %d" % i)
                break
            else:
                result_data = json.loads(content)
                if result_data['code'] != '99999':  # 若返回码不是表示"成功"时, 直接退出
                    helper.my_print_error(
                        "result code is not 99999. get_question_data will be exit. "
                        "the next start_index should be %d" % i)
                    break

                helper.save_content_to_file(config.FILENAME_QUESTION_DATA,
                                            content)
                helper.my_print(content, debug)
                helper.my_print("", debug)

    print('Done. %f seconds cost.' % (time.time() - t0))
示例#18
0
def main(all_datasets, gpu, epoch):

    device = torch.device('cuda:{}'.format(gpu))

    attack_names = ['DeepFool', 'CarliniWagner']

    for attack_name in attack_names:
        csv_rows = []
        for dataset in all_datasets:
            ds_obj, datasets, data_loaders = \
                hp.get_data_loder_objects(dataset, PHASES, **hp.get_loader_kwargs(batch_size))

            for dir_name in os.listdir('../{}/adversarial_images/'.format(
                    ds_obj.name)):
                # dir_name contains model name and other params, process them here
                if 'RegularizedLoss' in dir_name:
                    model_name = dir_name.split('_RegularizedLoss_')[0]
                    with_regularization = True
                    tau = float(dir_name.split('_tau_')[1].split('_')[0])
                    alpha = float(dir_name.split('_alpha_')[1].split('_')[0])
                    if 'probabilities' in dir_name:
                        probabilities = True
                    else:
                        probabilities = False
                    if 'exact' in dir_name:
                        sigmoid_approx = False
                    else:
                        sigmoid_approx = True
                else:
                    model_name = dir_name
                    with_regularization = False
                    tau, alpha, sigmoid_approx, probabilities = None, None, None, None
                if 'robust' in dir_name:
                    robust_regularization = True
                    beta = float(dir_name.split('_beta_')[1].split('_')[0])
                    gamma = float(dir_name.split('_gamma_')[1].split('_')[0])
                else:
                    robust_regularization = False
                    beta, gamma = None, None

            # for model_name in DATASET_TO_MODEL_NAMES[dataset.split('_')[0].lower()]:
            # taus = DATASET_TO_MODEL_TO_TAUS[dataset.split('_')[0].lower()][model_name]
            # alphas = DATASET_TO_MODEL_TO_ALPHAS[dataset.split('_')[0].lower()][model_name]
            # for (tau_idx, tau), (alpha_idx, alpha) in itertools.product(*[enumerate(taus), enumerate(alphas)]):

                regularization_params = {
                    'tau': tau,
                    'alpha': alpha,
                    'sigmoid_approx': sigmoid_approx,
                    'probabilities': probabilities,
                    'robust_regularization': robust_regularization,
                    'beta': beta,
                    'gamma': gamma,
                    'device': device
                }
                model_to_load = model.DNN(
                    model_name=model_name,
                    num_classes=ds_obj.num_classes(),
                    learning_rate=learning_rate,
                    aggregate_coeff=aggregate_coeff,
                    with_regularization=with_regularization,
                    regularization_params=regularization_params)

                complete_model_name = '{}_{}'.format(model_to_load.model_name, model_to_load.criterion._get_name()) \
                    if not isinstance(model_to_load.criterion, nn.CrossEntropyLoss) else model_to_load.model_name

                print('Attack: {}, Dataset: {}, Model: {}'.format(
                    attack_name, dataset, complete_model_name))

                adv_folder = '../{}/adversarial_images/{}/{}'.format(
                    ds_obj.name, complete_model_name, attack_name)
                adv_image_ids, all_adv_objs = hp.load_adversarial_objects(
                    folder=adv_folder,
                    epoch=epoch,
                    ds_obj=ds_obj,
                    device=device)
                all_images_adversarial = [x.image for x in all_adv_objs]

                print(adv_folder)
                print(
                    len(glob.glob("{}/*_epoch_{}*".format(adv_folder, epoch))))

                if 'cifar' in ds_obj.name.lower():
                    if ds_obj.name.lower() == 'cifar10':
                        sensitive_attrs, sensitive_attrs_names = [], []
                        for cname in ds_obj.classes:
                            sensitive_attrs_names.append(cname)
                            sensitive_attrs.append(np.array([1 if ds_obj.classes[ds_obj.test_labels[int(img_id)]] == cname \
                                                            else 0 for img_id in adv_image_ids]))
                    else:
                        sensitive_attrs = [np.array(
                                                [1 if ds_obj.classes[ds_obj.test_labels[int(img_id)]] == ds_obj.name.split('_')[-1].lower() \
                                                else 0 for img_id in adv_image_ids])]
                        sensitive_attrs_names = [
                            ds_obj.name.lower().split('_')[-1]
                        ]
                else:
                    attr = ds_obj.name.lower().split('_')[-1]
                    sensitive_attrs = [np.array([ds_obj.get_image_protected_class('test', int(img_id), attr=attr) \
                                            for img_id in adv_image_ids])] # sens_attr = 1 means minority
                    sensitive_attrs_names = [
                        'Black' if attr == 'race' else 'Female'
                    ]

                majority_differences, minority_differences = [], []
                for sensitive_attr in sensitive_attrs:
                    minority_difference, majority_difference = image_differences(
                        adv_image_ids, all_images_adversarial, sensitive_attr,
                        ds_obj)
                    majority_differences.append(majority_difference)
                    minority_differences.append(minority_difference)

                for minority_difference, majority_difference, sensitive_attr_name in zip(
                        minority_differences, majority_differences,
                        sensitive_attrs_names):
                    mu_minority, mu_majority = np.mean(
                        minority_difference), np.mean(majority_difference)
                    csv_rows.append([
                        dataset, complete_model_name, sensitive_attr_name,
                        mu_minority, mu_majority
                    ])

        hp.create_dir("pickled_ubs")
        df = pd.DataFrame(
            csv_rows,
            columns=['dataset', 'model', 'minority', 'mu_min', 'mu_maj'])
        df.to_csv('pickled_ubs/{}_cdf_mus_regularized.csv'.format(attack_name),
                  index=False)

        print('Saved to pickled_ubs/{}_cdf_mus_regularized.csv!'.format(
            attack_name))
示例#19
0
def analyse_results(SM_PC,
                    SM_BC,
                    RM_PC,
                    RM_BC,
                    selection,
                    StateM_PC,
                    StateM_BC,
                    seed,
                    multiplier,
                    linear,
                    pklf_name,
                    dir_name,
                    analyse_replay=True,
                    TFR=True,
                    save=True,
                    verbose=True):
    """
    Analyses results from simulations (see `detect_oscillations.py`)
    :param SM_PC, SM_BC, RM_PC, RM_BC: Brian2 spike and rate monitors of PC and BC populations (see `run_simulation()`)
    :param selection: array of selected cells used by PC multi state monitor
    :param seed: random seed used to run the sim - here used only for saving
    :param multiplier: weight matrix multiplier (see `spw_network_wmx_mult.py`)
    :param linear: bool for linear/circular weight matrix (more advanced replay detection is used in linear case)
    :param pklf_name: file name of saved place fileds used for replay detection in the linear case
    :param dir_name: subdirectory name used to save replay detection (and optionally TFR) figures in linear case
    :param analyse_replay: bool for analysing replay or not
    :param TFR: bool for calculating time freq. repr. (using wavelet analysis) or not
    :param save: bool for saving results
    :param verbose: bool for printing results or not
    """

    if SM_PC.num_spikes > 0 and SM_BC.num_spikes > 0:  # check if there is any activity

        spike_times_PC, spiking_neurons_PC, rate_PC, ISI_hist_PC, bin_edges_PC = preprocess_monitors(
            SM_PC, RM_PC)
        spike_times_BC, spiking_neurons_BC, rate_BC = preprocess_monitors(
            SM_BC, RM_BC, calc_ISI=False)
        if not linear:
            plot_raster(spike_times_PC,
                        spiking_neurons_PC,
                        rate_PC, [ISI_hist_PC, bin_edges_PC],
                        None,
                        "blue",
                        multiplier_=multiplier)
        subset = plot_zoomed(spike_times_PC,
                             spiking_neurons_PC,
                             rate_PC,
                             "PC_population",
                             "blue",
                             multiplier_=multiplier,
                             StateM=StateM_PC,
                             selection=selection)
        plot_zoomed(spike_times_BC,
                    spiking_neurons_BC,
                    rate_BC,
                    "BC_population",
                    "green",
                    multiplier_=multiplier,
                    PC_pop=False,
                    StateM=StateM_BC)
        plot_detailed(StateM_PC, subset, multiplier_=multiplier)

        if not linear:
            slice_idx = []
            replay_ROI = np.where((150 <= bin_edges_PC)
                                  & (bin_edges_PC <= 850))
            replay = replay_circular(
                ISI_hist_PC[replay_ROI]) if analyse_replay else np.nan
        else:
            slice_idx = slice_high_activity(rate_PC, th=2, min_len=260)
            plot_raster(spike_times_PC,
                        spiking_neurons_PC,
                        rate_PC, [ISI_hist_PC, bin_edges_PC],
                        slice_idx,
                        "blue",
                        multiplier_=multiplier)
            if analyse_replay:
                replay, replay_results = replay_linear(spike_times_PC,
                                                       spiking_neurons_PC,
                                                       slice_idx,
                                                       pklf_name,
                                                       N=30)
            else:
                replay, replay_results = np.nan, {}
            if slice_idx and replay_results != {}:
                create_dir(dir_name)
                for bounds, tmp in replay_results.items():
                    fig_name = os.path.join(
                        dir_name, "%i-%i_replay.png" % (bounds[0], bounds[1]))
                    plot_posterior_trajectory(tmp["X_posterior"],
                                              tmp["fitted_path"], tmp["R"],
                                              fig_name)
            if save:
                save_replay_analysis(replay, replay_results, seed)

        mean_rate_PC, rate_ac_PC, max_ac_PC, t_max_ac_PC, f_PC, Pxx_PC = analyse_rate(
            rate_PC, 1000., slice_idx)
        mean_rate_BC, rate_ac_BC, max_ac_BC, t_max_ac_BC, f_BC, Pxx_BC = analyse_rate(
            rate_BC, 1000., slice_idx)
        plot_PSD(rate_PC,
                 rate_ac_PC,
                 f_PC,
                 Pxx_PC,
                 "PC_population",
                 "blue",
                 multiplier_=multiplier)
        plot_PSD(rate_BC,
                 rate_ac_BC,
                 f_BC,
                 Pxx_BC,
                 "BC_population",
                 "green",
                 multiplier_=multiplier)

        t_LFP, LFP, f_LFP, Pxx_LFP = analyse_estimated_LFP(
            StateM_PC, selection, slice_idx)
        plot_LFP(t_LFP, LFP, f_LFP, Pxx_LFP, multiplier_=multiplier)

        if save:
            save_LFP(t_LFP, LFP, seed)
            save_PSD(f_PC, Pxx_PC, f_BC, Pxx_BC, f_LFP, Pxx_LFP, seed)

        if TFR:
            coefs_PC, freqs_PC = calc_TFR(rate_PC, 1000., slice_idx)
            coefs_BC, freqs_BC = calc_TFR(rate_BC, 1000., slice_idx)
            coefs_LFP, freqs_LFP = calc_TFR(LFP[::10].copy(), 1000., slice_idx)
            if not linear or not slice_idx:
                plot_TFR(
                    coefs_PC, freqs_PC, "PC_population",
                    os.path.join(base_path, "figures",
                                 "%.2f_PC_population_wt.png" % multiplier))
                plot_TFR(
                    coefs_BC, freqs_BC, "BC_population",
                    os.path.join(base_path, "figures",
                                 "%.2f_BC_population_wt.png" % multiplier))
                plot_TFR(
                    coefs_LFP, freqs_LFP, "LFP",
                    os.path.join(base_path, "figures",
                                 "%.2f_LFP_wt.png" % multiplier))
            else:
                for i, bounds in enumerate(slice_idx):
                    fig_name = os.path.join(
                        dir_name,
                        "%i-%i_PC_population_wt.png" % (bounds[0], bounds[1]))
                    plot_TFR(coefs_PC[i], freqs_PC, "PC_population", fig_name)
                    fig_name = os.path.join(
                        dir_name,
                        "%i-%i_BC_population_wt.png" % (bounds[0], bounds[1]))
                    plot_TFR(coefs_BC[i], freqs_PC, "BC_population", fig_name)
                    fig_name = os.path.join(
                        dir_name, "%i-%i_LFP_wt.png" % (bounds[0], bounds[1]))
                    plot_TFR(coefs_LFP[i], freqs_LFP, "LFP", fig_name)

            if save:
                save_TFR(freqs_PC, coefs_PC, freqs_BC, coefs_BC, freqs_LFP,
                         coefs_LFP, seed)

        max_ac_ripple_PC, t_max_ac_ripple_PC = ripple_AC(rate_ac_PC, slice_idx)
        max_ac_ripple_BC, t_max_ac_ripple_BC = ripple_AC(rate_ac_BC, slice_idx)
        avg_ripple_freq_PC, ripple_power_PC = ripple(f_PC, Pxx_PC, slice_idx)
        avg_ripple_freq_BC, ripple_power_BC = ripple(f_BC, Pxx_BC, slice_idx)
        avg_ripple_freq_LFP, ripple_power_LFP = ripple(f_LFP, Pxx_LFP,
                                                       slice_idx)
        avg_gamma_freq_PC, gamma_power_PC = gamma(f_PC, Pxx_PC, slice_idx)
        avg_gamma_freq_BC, gamma_power_BC = gamma(f_BC, Pxx_BC, slice_idx)
        avg_gamma_freq_LFP, gamma_power_LFP = gamma(f_LFP, Pxx_LFP, slice_idx)

        if verbose:
            if not np.isnan(replay):
                print("Replay detected!")
            else:
                print("No replay detected...")
            print("Mean excitatory rate: %.3f" % mean_rate_PC)
            print("Mean inhibitory rate: %.3f" % mean_rate_BC)
            print("Average exc. ripple freq: %.3f" % avg_ripple_freq_PC)
            print("Exc. ripple power: %.3f" % ripple_power_PC)
            print("Average inh. ripple freq: %.3f" % avg_ripple_freq_BC)
            print("Inh. ripple power: %.3f" % ripple_power_BC)
            print("Average LFP ripple freq: %.3f" % avg_ripple_freq_LFP)
            print("LFP ripple power: %.3f" % ripple_power_LFP)

        return [
            multiplier, replay, mean_rate_PC, mean_rate_BC, avg_ripple_freq_PC,
            ripple_power_PC, avg_ripple_freq_BC, ripple_power_BC,
            avg_ripple_freq_LFP, ripple_power_LFP, avg_gamma_freq_PC,
            gamma_power_PC, avg_gamma_freq_BC, gamma_power_BC,
            avg_gamma_freq_LFP, gamma_power_LFP, max_ac_PC, max_ac_ripple_PC,
            max_ac_BC, max_ac_ripple_BC
        ]
    else:
        if verbose:
            print("No activity!")
        return [np.nan for _ in range(20)]
def main(dataset,
         gpu,
         model_name,
         epochs,
         taus,
         alphas,
         with_regularization=False,
         sigmoid_approx=False,
         probabilities=False,
         robust_regularization=False,
         betas=[None],
         gammas=[None]):

    device = torch.device('cuda:{}'.format(gpu))

    attack_names = ['DeepFool', 'CarliniWagner']

    ds_obj, datasets, data_loaders = \
        hp.get_data_loder_objects(dataset, PHASES, **hp.get_loader_kwargs(batch_size))

    for epoch in epochs:
        for (tau_idx, tau), (alpha_idx, alpha), (beta_idx, beta), (gamma_idx, gamma) in \
            itertools.product(*[enumerate(taus), enumerate(alphas), enumerate(betas), enumerate(gammas)]):

            regularization_params = {
                'tau': tau,
                'alpha': alpha,
                'sigmoid_approx': sigmoid_approx,
                'probabilities': probabilities,
                'robust_regularization': robust_regularization,
                'beta': beta,
                'gamma': gamma,
                'device': device
            }
            model_to_load = model.DNN(
                model_name=model_name,
                num_classes=ds_obj.num_classes(),
                learning_rate=learning_rate,
                aggregate_coeff=aggregate_coeff,
                with_regularization=with_regularization,
                regularization_params=regularization_params)

            # filename = '{}_{}_epoch_{}_lr_{}.pth'.format(model_to_load.model_name, model_to_load.criterion._get_name(),
            #     epoch, learning_rate)
            # model_to_load.model_ft.load_state_dict(torch.load('../{}/model_weights/{}'.format(ds_obj.name, filename),
            #                                          map_location=device))
            # model_to_load.model_ft.eval()
            # print ('Loaded weights from: ../{}/model_weights/{}'.format(ds_obj.name, filename))

            complete_model_name = '{}_{}'.format(model_to_load.model_name, model_to_load.criterion._get_name()) \
                if not isinstance(model_to_load.criterion, nn.CrossEntropyLoss) else model_to_load.model_name

            for attack_name in attack_names:

                adv_folder = '../{}/adversarial_images/{}/{}'.format(
                    ds_obj.name, complete_model_name, attack_name)
                adv_image_ids, all_adv_objs = hp.load_adversarial_objects(
                    folder=adv_folder,
                    epoch=epoch,
                    ds_obj=ds_obj,
                    device=device)
                all_images_adversarial = [x.image for x in all_adv_objs]

                print(adv_folder)
                print(
                    len(glob.glob("{}/*_epoch_{}*".format(adv_folder, epoch))))

                if 'cifar' in ds_obj.name.lower():
                    if ds_obj.name.lower() == 'cifar10':
                        sensitive_attrs, sensitive_attrs_names = [], []
                        for cname in ds_obj.classes:
                            sensitive_attrs_names.append(cname)
                            sensitive_attrs.append(np.array([1 if ds_obj.classes[ds_obj.test_labels[int(img_id)]] == cname \
                                                            else 0 for img_id in adv_image_ids]))
                    else:
                        sensitive_attrs = [np.array(
                                                [1 if ds_obj.classes[ds_obj.test_labels[int(img_id)]] == ds_obj.name.split('_')[-1].lower() \
                                                else 0 for img_id in adv_image_ids])]
                        sensitive_attrs_names = [
                            ds_obj.name.lower().split('_')[-1]
                        ]
                else:
                    attr = ds_obj.name.lower().split('_')[-1]
                    sensitive_attrs = [np.array([ds_obj.get_image_protected_class('test', int(img_id), attr=attr) \
                                            for img_id in adv_image_ids])]
                    sensitive_attrs_names = [
                        'Black' if attr == 'race' else 'Female'
                    ]

                majority_differences, minority_differences = [], []
                for sensitive_attr in sensitive_attrs:
                    minority_difference, majority_difference = image_differences(
                        adv_image_ids, all_images_adversarial, sensitive_attr,
                        ds_obj)
                    majority_differences.append(majority_difference)
                    minority_differences.append(minority_difference)

                # print (minority_difference, majority_difference)

                hp.create_dir("plots/{}".format(ds_obj.name))
                hp.create_dir("plots/{}/{}".format(ds_obj.name,
                                                   model_to_load.model_name))
                hp.create_dir("plots/{}/{}/{}".format(ds_obj.name,
                                                      model_to_load.model_name,
                                                      attack_name))

                dir_to_save = "plots/{}/{}/{}".format(ds_obj.name,
                                                      model_to_load.model_name,
                                                      attack_name)

                # taus = np.linspace(0.0, 0.5, 2000)
                taus = np.linspace(0.0, 2.0, 2000)
                # taus = np.linspace(0.0, 2.0, 2000) if 'deepfool' in attack_name.lower() else np.linspace(2.9, 3.1, 2000)

                for minority_difference, majority_difference, sensitive_attr_name in zip(
                        minority_differences, majority_differences,
                        sensitive_attrs_names):
                    frac_greater_than_tau_majority = np.array([
                        np.sum(majority_difference > t) /
                        len(majority_difference) for t in taus
                    ])
                    frac_greater_than_tau_minority = np.array([
                        np.sum(minority_difference > t) /
                        len(minority_difference) for t in taus
                    ])

                    if paper_friendly_plots:
                        set_paper_friendly_plots_params()

                    fig = plt.figure()
                    if not paper_friendly_plots:
                        fig.suptitle(
                            r'fraction $d_\theta > \tau$ for {}'.format(
                                ds_obj.name),
                            fontsize=20)
                    ax = fig.add_subplot(111)
                    ax.plot(taus,
                            frac_greater_than_tau_majority,
                            color='blue',
                            label='Other Classes')
                    ax.plot(taus,
                            frac_greater_than_tau_minority,
                            color='red',
                            label='{}'.format(sensitive_attr_name))
                    ax.set_xlabel('Distance to Adv. Sample' + r' ($\tau$)')
                    ax.set_ylabel(r'$ \widehat{I^\tau_s} $')
                    plt.legend()

                    extension = 'png' if not paper_friendly_plots else 'pdf'
                    filename = '{}_inv_cdf'.format(model_to_load.criterion._get_name()) \
                        if not isinstance(model_to_load.criterion, nn.CrossEntropyLoss) else \
                            'inv_cdf_{}'.format(sensitive_attr_name)
                    plt.savefig('{}/{}.{}'.format(dir_to_save, filename,
                                                  extension),
                                bbox_inches='tight')
                    plt.show()
                    plt.close()
示例#21
0
 def set_exp_dir(self):
     folder = "%s_%s_%s" % (str(
         time.strftime("%Y-%m-%d_%H-%M")), str(
             self.params['type']).lower(), str(self.params['grid']).lower())
     path_to_dir = os.path.join(os.getcwd(), 'logs', folder)
     return helper.create_dir(path_to_dir)
示例#22
0
    def calibrate(self):
        """Returns Intrinsic and Extrinsic properties of camera.
		Intrinsic properties of camera: focal length (fx, fy) and center (Cx, Cy)
		
		Extrinsic camera properties: distortion coefficients, rotation vector and translation vector
		"""
        assert os.path.exists(self._source_dir), \
            '"{}" must exist and contain calibration images.'.format(self._source_dir)

        for _, fname in tqdm(enumerate(os.listdir(self._source_dir)), total=self.num, desc='Loading chessboard images'):
            img = cv2.imread(os.path.join(self._source_dir, fname), cv2.IMREAD_GRAYSCALE)
            _, mask = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            self.masks.append(mask)

        for _, img in tqdm(enumerate(self.masks), total=self.num, desc='Detecting chessboard images'):
            ret, corners = cv2.findChessboardCorners(img, self.chessboard_size, cv2.CALIB_CB_ADAPTIVE_THRESH +
                                                     cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)

            # If found, add object point, image points after refining them
            if ret:
                self.objpts.append(self.objpt)
                # refine corner location based on criteria
                self.corners_ = cv2.cornerSubPix(img, corners, (11, 11), (-1, -1), self.criteria)
                self.imgpts.append(self.corners_)
            else:
                raise ChessBoardError()

        print(f'\nCalibrating {self.num} valid chessboards images found {self._source_dir} ...\n')

        self.dims = img.shape[::-1]
        self.h, self.w = self.dims
        self.img_orig = img.copy()

        # Draw and display the corners
        cv2.drawChessboardCorners(self.img_orig, self.chessboard_size, self.corners_, ret)

        # Calibrate camera
        ret, self.cam_mtx, self.dist_coeff, self.rvecs, self.tvecs = cv2.calibrateCamera(self.objpts, self.imgpts,
                                                                               self.dims, None, None)
        create_dir(self.target_dir)

        # Mean reprojection error
        mean_err = 0

        for i in range(len(self.objpts)):
            r_mtx_, _ = cv2.Rodrigues(self.rvecs[i], jacobian=0)
            imgpts_, _ = cv2.projectPoints(self.objpts[i], self.rvecs[i], self.tvecs[i], self.cam_mtx, self.dist_coeff)
            error = cv2.norm(self.imgpts[i], imgpts_, cv2.NORM_L2) / len(imgpts_)
            mean_err += error
            self.errors.append(error)
            self.rot_mtx.append(r_mtx_)
        self.mean_err = mean_err / len(self.objpts)

        if self.show:
            plt.imshow(self.img_orig, cmap='hot')
            plt.axis('off')
            plt.savefig(f'{self.target_dir}/{self.filename}_calibrated.png', dpi=1000)

            pos = np.arange(self.num)
            plt.figure()
            plt.bar(pos, self.errors, width=0.6, color='blue')
            plt.axhline(self.mean_err, color='red', linestyle='--')
            plt.ylabel('Mean error in Pixels')
            plt.xlabel('Image')
            plt.savefig(f'{self.target_dir}/{self.filename}_mean_error.png', dpi=1000)
            plt.show()
        else:
            plt.imshow(self.img_orig, cmap='hot')
            plt.axis('off')
            plt.savefig(f'{self.target_dir}/{self.filename}_calibrated.png', dpi=1000)

            pos = np.arange(self.num)
            plt.figure()
            plt.bar(pos, self.errors, width=0.6, color='blue')
            plt.axhline(self.mean_err, color='red', linestyle='--')
            plt.ylabel('Mean error in Pixels')
            plt.xlabel('Image')
            plt.savefig(f'{self.target_dir}/{self.filename}_mean_error.png', dpi=1000)

        self.save_calibration_results()