Example #1
0
 def save_model(self, model_path=None):
     if model_path is None:
         model_path = self.model_path
     utils.check_dir(model_path+"a")
     np.save(os.path.join(model_path,"Weight"),self.weight)
     np.save(os.path.join(model_path,"Bias"),self.bias)
     logging.info('Save model to ' + model_path[:50] + '...')     
Example #2
0
    def get_daily_data(self, code, expire=60 * 6):
        """
        获取一支股票所有历史数据保存到本地
        """
        UT.check_dir(CT.DAILY_DIR)
        file_path = CT.DAILY_DIR + code
        expired = UT.check_file_expired(file_path, expire)

        if expired or not os.path.exists(file_path):
            symbol = self._code_to_symbol(code)
            start_date = CT.START
            end_date = date_time.get_today_str()
            adjust = 'qfq'
            if is_index(code):
                d = ak.stock_zh_index_daily(symbol)
            else:
                d = ak.stock_zh_a_daily(symbol, start_date, end_date, adjust)
            if d is None:
                return d
            d.to_csv(file_path, sep='\t')

        if not os.path.exists(file_path):
            return None
        #d = pd.read_csv(file_path, sep='\t', index_col=0)
        d = pd.read_csv(file_path,
                        sep='\t',
                        skiprows=0,
                        parse_dates=True,
                        header=0,
                        index_col=0)
        return d
def controller_plots(model_dir, ds, ds_eval, groundtruth, prediction,
                     communication):
    """

    :param model_dir: directory containing the trained model
    :param ds: name of the dataset
    :param ds_eval: name of the dataset  for the evaluation (usually the manual one)
    :param groundtruth: evidence
    :param prediction: output control
    :param communication: states if the communication is used by the network
    """
    model_img = '%s/images/' % model_dir
    utils.check_dir(model_img)

    # Plot R^2 of the regressor between prediction and ground truth
    title = 'Regression %s vs %s' % (ds_eval, ds)
    file_name = 'regression-%svs%s' % (ds_eval, ds)

    if not communication:
        groundtruth = np.array(groundtruth).flatten()
        prediction = np.array(prediction).flatten()

    x_label = 'groundtruth'
    y_label = 'prediction'
    my_plots.plot_regressor(groundtruth, prediction, x_label, y_label,
                            model_img, title, file_name)
Example #4
0
    def get_tick_data(self, code, trade_date, expire=60 * 24 * 365 * 10):
        """
        获取一支股票一天的tick数据保存到本地
        """
        UT.check_dir(CT.TICK_DIR + code)
        file_path = CT.TICK_DIR + code + '/' + trade_date
        symbol = self._code_to_symbol(code)
        trade_date = date_time.date_to_str(date_time.str_to_date(trade_date),
                                           '%Y%m%d')
        expired = UT.check_file_expired(file_path, expire)
        if expired or not os.path.exists(file_path):
            d = ak.stock_zh_a_tick_tx(symbol, trade_date)
            #过掉当天没数据的
            if d is None or len(d) < 10:
                return None
            d.to_csv(file_path, sep='\t')

        if not os.path.exists(file_path):
            return None

        #d = pd.read_csv(file_path, sep='\t', index_col=1)
        d = pd.read_csv(file_path,
                        sep='\t',
                        skiprows=0,
                        parse_dates=True,
                        header=0,
                        index_col=0)

        #过掉当天没数据的
        if d is None or len(d) < 10:
            return None
        return d
Example #5
0
    def get_minute_data(self, code, period='1', adjust="", expire=60 * 6):
        """
        获取一支股票分钟级数据保存到本地
        """
        UT.check_dir(CT.MINUTE_DIR + '/' + period)
        file_path = CT.MINUTE_DIR + '/' + period + '/' + code

        expired = UT.check_file_expired(file_path, expire)
        if expired or not os.path.exists(file_path):
            symbol = self._code_to_symbol(code)
            start_date = CT.START
            end_date = date_time.get_today_str()
            adjust = 'qfq'
            # d = ak.stock_zh_a_minute(symbol, period, adjust)
            d = self.stock_zh_a_minute(symbol, period, adjust)
            if d is None:
                return d
            d.to_csv(file_path, sep='\t')

        if not os.path.exists(file_path):
            return None
        #d = pd.read_csv(file_path, sep='\t', index_col=1)
        d = pd.read_csv(file_path,
                        sep='\t',
                        skiprows=0,
                        parse_dates=True,
                        header=0,
                        index_col=0)
        return d
def yolo_to_mask_images(path, save_path, pedestrian_ratio=0.4, vehicle_ratio=0.5):
    vehicle_mask, pedestrian_mask = get_mask_filename(path, pedestrian_ratio, vehicle_ratio)
    all_mask = np.array(vehicle_mask + pedestrian_mask)
    all_mask = np.unique(all_mask)
    
    img_save_path = os.path.join(save_path, 'images')
    label_save_path = os.path.join(save_path, 'labels')
    check_dir(img_save_path)
    check_dir(label_save_path)
    
    img_root = os.path.join(path, 'images')
    label_root = os.path.join(path, 'labels')

    for filename in all_mask:
        img_path = os.path.join(img_root, filename)
        label_path = os.path.join(label_root, filename[:-4] + ".txt")
        mask_vehicle = filename in vehicle_mask
        mask_pedestrian = filename in pedestrian_mask
        mask_img, mask_labels = transform_mask(img_path, label_path,
                                               mask_pedestrian=True, mask_vehicle=True, background_color=(114, 114, 114))
        
        
        if len(mask_labels) == 0:
            continue
        
        save_name = filename[:-4] + '_mask'
        with open(os.path.join(label_save_path, save_name + ".txt"),"w") as weiter:
            for mask_label in mask_labels:
                weiter.write(mask_label)

        Image.fromarray(mask_img).save(os.path.join(img_save_path, save_name + '.jpg'))
Example #7
0
def ivs_to_yolo(path, save_path, sampling_margin=200):
    img_dirs = os.listdir(path)
    img_save_path = os.path.join(save_path, 'images')
    label_save_path = os.path.join(save_path, 'labels')
    check_dir(img_save_path)
    check_dir(label_save_path)

    for img_dir in img_dirs:
        img_root = os.path.join(path, img_dir)
        filenames = os.listdir(img_root)

        for filename in filenames[0::sampling_margin]:
            source = os.path.join(img_root, filename)
            dest = os.path.join(img_save_path, filename)
            shutil.copyfile(source, dest)

            source = source.replace('JPEGImages',
                                    'Annotations').replace('jpg', 'xml')
            bboxes = xml_convert_yolo(source)
            filename = filename.replace('jpg', 'txt')
            dest = os.path.join(label_save_path, filename)
            with open(dest, "w") as weiter:
                for bboxe in bboxes:
                    msg = msg_format % bboxe
                    weiter.write(msg)
Example #8
0
def bdd_to_yolo(path='./bdd100k', save_path='./bdd100k/val', mode='val'):
    labels_path = os.path.join(
        path, 'labels/bdd100k_labels_images_' + mode + '.json')
    with open(labels_path) as f:
        all_labels = json.load(f)

    img_root = os.path.join(path, 'images/100k/' + mode)
    img_save_path = os.path.join(save_path, 'images')
    label_save_path = os.path.join(save_path, 'labels')
    check_dir(img_save_path)
    check_dir(label_save_path)
    rider_names, motor_names, bike_names = get_bdd_fixed_filename(all_labels)
    for labels in all_labels:
        filename = labels['name']
        if filename in rider_names:
            continue

        shutil.copyfile(os.path.join(img_root, filename),
                        os.path.join(img_save_path, filename))

        bboxes = json_convert_yolo(labels)

        dest = os.path.join(label_save_path, filename[:-4] + ".txt")
        with open(dest, "w") as weiter:
            for bboxe in bboxes:
                msg = msg_format % bboxe
                weiter.write(msg)
def evaluate(args):
    """ Evaluate model on testset
    """
    # get image paths and split dataset
    testset_dir = os.path.join(args.dataset_dir, 'test')
    check_dir(testset_dir, report_error=True)
    _, test_img_paths, _, _ = \
        get_and_split_dataset(testset_dir, test_size=1.0)
    with open(args.label_path, 'r') as fid:
        label2idx = json.loads(fid.read())
    idx2label = defaultdict(str)
    for k, v in label2idx.items():
        idx2label[v] = k
    # create dataset
    testset = Dataset(test_img_paths,
                      label2idx=label2idx,
                      idx2label=idx2label,
                      target_shape=TARGET_SHAPE,
                      batch_size=args.batch_size,
                      shuffle=False)

    # load model
    model = load_model(args.model_path)

    # get prediction
    error_cnt = 0
    for images, labels in testset.generate(epoch_stop=True):
        # predict
        gt_labels = np.argmax(labels, axis=1)
        pred_probs = model.predict(images)
        print(pred_probs.shape)
        pred_labels = np.argmax(pred_probs, axis=1)
        # print results
        for idx, gt in enumerate(gt_labels):
            pred = pred_labels[idx]
            print('gt={}, pred={}'.format(idx2label[gt], idx2label[pred]))
            true_or_false = True
            if gt != pred:
                error_cnt += 1
                true_or_false = False
            if args.save_samples is True:
                img_path = 'output/test_{}_gt_{}-pred_{}.png'.format(
                    true_or_false, idx2label[gt], idx2label[pred])
                img = images[idx, ...] * 255
                img = img.astype(np.uint8)
                cv2.imwrite(img_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
    num_samples = testset.get_dataset_size()
    print('total acc={}%'.format(
        (num_samples - error_cnt) * 100 / float(num_samples)))
Example #10
0
def main():
    detector = YoloV3Detection(graph_path=args.graph_path)
    utils.check_dir(args.out_dir)
    args.mode = "video"
    args.video_pth = "hiv00418.mp4"
    if args.mode == "video":
        cap = cv2.VideoCapture(args.video_pth)
        total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        vid_w, vid_h = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)
        fps = cap.get(cv2.CAP_PROP_FPS)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        output_video = os.path.join(args.out_dir,
                                    os.path.split(args.video_pth)[-1])
        out_res = cv2.VideoWriter(output_video, fourcc, fps,
                                  (int(vid_w), int(vid_h)))

        count = 0
        while cap.isOpened() and count <= total_frame:
            ret, frame = cap.read()
            if ret:
                boxes, labels, labels_idx = process(detector, frame)
                print(boxes)
                frame = utils.draw_boxes_with_texts(frame, boxes, labels,
                                                    labels_idx)
            out_res.write(frame)
            cv2.imshow("Demo", frame)
            count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif args.mode == "images":
        list_ims = os.listdir(args.im_dir)
        for im_n in tqdm(list_ims):
            if im_n.split(".")[-1] in ["jpg", "png"]:
                im_pth = os.path.join(args.im_dir, im_n)
                im = cv2.imread(im_pth)
                boxes, labels, labels_idx = process(detector, im)
                im = utils.draw_boxes_with_texts(im, boxes, labels, labels_idx)
                # save
                im_save_pth = os.path.join(args.out_dir, im_n)
                cv2.imwrite(im_save_pth, im)

    else:
        print("[ERRO] Mode is not exits, check mode pls ... ")
def parse_args():
    """ Parse arguments from command line
    """
    ap = argparse.ArgumentParser()
    ap.add_argument('-d',
                    '--dataset-dir',
                    type=str,
                    default=DATASET_PATH,
                    help="path to input dataset")
    ap.add_argument('-t',
                    '--test-ratio',
                    type=float,
                    default=0.2,
                    help='ratio of samples in testset over the whole dataset')
    ap.add_argument('-m',
                    '--model-path',
                    type=str,
                    default=MODEL_SAVE_PATH,
                    help="path to output model")
    ap.add_argument('-l',
                    '--label_path',
                    type=str,
                    default=LABELBIN_SAVE,
                    help="path to output label binarizer")
    ap.add_argument('-p',
                    '--plot-path',
                    type=str,
                    default=LOSS_PLOT_PATH,
                    help="path to output accuracy/loss plot")
    ap.add_argument('-b',
                    '--batch-size',
                    type=int,
                    default=128,
                    help='batch size')
    ap.add_argument('-e', '--epochs', type=int, default=25, help='')
    ap.add_argument('-i', '--init-lr', type=float, default=1e-3)
    ap.add_argument('--phase',
                    type=str,
                    default='train',
                    choices=['train', 'evaluate'],
                    help='specify operations, train or evaluate')
    ap.add_argument('--save-samples',
                    type=bool,
                    default=True,
                    help='flag to indicate whether save samples on evaluating')
    args = ap.parse_args()
    # check args
    check_dir(args.dataset_dir, report_error=True)
    check_dir(os.path.dirname(args.model_path))
    check_dir(os.path.dirname(args.label_path))
    check_dir(os.path.dirname(args.plot_path))
    return args
Example #12
0
def run_iterations(args, augmentator, augmentator_name, tmp_output_directory,
                   iterations, datasets_dict, classifier_name, epochs, start):
    print('\t\twithout augmentation: ', augmentator_name)

    for dataset_name in dataset_names_for_archive[ARCHIVE_NAMES[0]]:

        print('\t\t\tdataset_name: ', dataset_name)

        upper_dir = tmp_output_directory + augmentator_name + '/' + dataset_name

        done = check_dir(upper_dir)

        if not done:
            #save all the predictions and the corresponding true class
            predicted_y = []
            expected_y = []

            for iter in range(iterations):
                print('\t\t\t\titer', iter)
                trr = '_itr_' + str(iter)

                output_directory = upper_dir + '/' + trr + '/'
                #print(output_directory)

                create_directory(output_directory)

                y_pred, y_true = fit_classifier_aug(args, augmentator,
                                                    datasets_dict,
                                                    dataset_name,
                                                    classifier_name, epochs,
                                                    output_directory)

                print('\t\t\t\tDONE')

                # the creation of this directory means
                create_directory(output_directory + '/DONE')

                if (y_pred.shape == y_true.shape):
                    predicted_y.extend(y_pred)
                    expected_y.extend(y_true)
                else:
                    raise Exception("FALSE: y_pred.shape==y_true.shape.")

            totalduration = time.time() - start
            df_metrics = calculate_metrics(expected_y, predicted_y,
                                           totalduration)
            df_metrics.to_csv(upper_dir + '/avg_metrics.csv', index=False)
            create_directory(upper_dir + '/DONE')

            print('iterations DONE!')
            print(df_metrics)
Example #13
0
def parse_args():
    """ Parse arguments from command line
    """
    ap = argparse.ArgumentParser()
    ap.add_argument('-d', '--dataset-dir', type=str, default=DATASET_PATH,
                    help="path to input dataset")
    ap.add_argument('-m', '--model-path', type=str, default=MODEL_SAVE_PATH,
                    help="path to output model")              
    ap.add_argument('-l', '--label_path', type=str, default=LABELBIN_SAVE,
                    help="path to output label binarizer")
    ap.add_argument('-p', '--plot-path', type=str, default=LOSS_PLOT_PATH,
                    help="path to output accuracy/loss plot")
    ap.add_argument('--trainlog-path', type=str, default=TRAIN_LOG_PATH,
                    help='path to training log')
    ap.add_argument('-b', '--batch-size', type=int, default=64, 
                    help='batch size')
    ap.add_argument('-e', '--epochs', type=int, default=20, 
                    help='number of epochs')
    ap.add_argument('-i', '--init-lr', type=float, default=1e-5)
    ap.add_argument('--phase', type=str, default='train', 
                    choices=['train', 'evaluate'], 
                    help='specify operations, train or evaluate')
    ap.add_argument('--goon-train', type=bool, default=False, 
                    help='load old model and go on training')
    ap.add_argument('--batch-normalize', type=bool, default=True,
                    help='add batch normalization layers after activations')
    ap.add_argument('--dropout', type=float, default=0.25, 
                    help='dropout probability on training')
    args = ap.parse_args()
    # check args
    check_dir(args.dataset_dir, report_error=True)
    check_dir(os.path.dirname(args.model_path))
    check_dir(os.path.dirname(args.label_path))
    check_dir(os.path.dirname(args.plot_path))
    check_dir(os.path.dirname(args.trainlog_path))
    return args
Example #14
0
def run_cv(args, augmentator, augmentator_name, tmp_output_directory,
           datasets_dict, classifier_name, epochs, start, cv):
    print('\t\taugmentator_name: ', augmentator_name)

    for dataset_name in dataset_names_for_archive[ARCHIVE_NAMES[0]]:

        print('\t\t\tdataset_name: ', dataset_name)

        output_directory = tmp_output_directory + augmentator_name + '/' + dataset_name

        done = check_dir(output_directory)

        if not done:

            create_directory(output_directory)

            cv_fit_classifier_aug(args, augmentator, datasets_dict,
                                  dataset_name, classifier_name, epochs,
                                  output_directory, cv)

            create_directory(output_directory + '/DONE')
Example #15
0
def main():

  #############################################################################
  # 0.
  #

  # Check if tmp folder exists, otherwise create it
  if check_dir(settings.tmp_dir) == True:
    sys.exit(0)
  else:
    os.makedirs(settings.tmp_dir)

  # Run some checks on the source folder with core data.
  if not get_years():
    # Is there anything in the source folder to begin with?
    print "We were not able to find a XLSX file with core data in the folder: "\
          "%s. Make sure this folder contains at least one XLSX file named "\
          "after the year (eg. 2014.xlsx). Check the readme for more info "\
          "about the required structure of these files.\n"\
          "Quiting..." % (settings.src_core)
    sys.exit(0)

  # Provide feedback that the script only processes XLSX files with properly
  # formatted filenames. (eg. 2014.xlsx)
  fn_pattern = re.compile('^20[0-9]{2}$')
  for f in os.listdir(settings.src_core):
    fn = os.path.splitext(f)[0]
    ext = os.path.splitext(f)[-1].lower()
    path = os.path.join(settings.src_core, fn)
    
    if not os.path.isdir(path):
      # Only check files
      if ext == ".xlsx":
        if not fn_pattern.match(fn):
          print "The XLSX file %s doesn't have a properly formatted year as "\
                "filename and will be ignored." % (f)
      else:
        print "The script only processes XLSX files. %s will be ignored." % (f)


  print "Loading the core and meta data..."

  # Build the different sets of admin areas with things we have to loop over.
  countries = build_set('country','type','iso',settings.src_meta_aa)
  states = build_set('state','type','iso',settings.src_meta_aa)
  admin_areas = countries | states
  
  # Build sets for the variables we loop over
  global index_param
  index_param = build_set('param','type','id',settings.src_meta_index)
  index_score = build_set('score','type','id',settings.src_meta_index)
  sp = list(index_score | index_param)

  # Build set for the years we're interested in
  global years
  years = get_years()
  global current_yr
  current_yr = max(years)


  # Read in the files with meta-data and set the scope to global
  global df_meta_aa
  df_meta_aa = pd.read_csv(settings.src_meta_aa,index_col='iso')
  global df_meta_index
  df_meta_index = pd.read_csv(settings.src_meta_index,index_col='id')


  #############################################################################
  # 1. Store the relevant core data in one DF (df_full)
  #
  #
  # Output: df_full
  #
  #             2014            2015
  # iso   ind   value   data    value   data
  # AR    0     1.2420  NaN     1.2235  NaN
  #       1.01  0.1802  78.17   0.1795  75.16
  # ...


  first_yr = True

  for yr in years:
    # All core data files are named after the year of the edition
    fn = settings.src_core + yr + '.xlsx'

    df_yr = pd.DataFrame()
    for sheet in settings.core_data_sheets:
      
      # Build an index to parse only the relevant columns
      cols_index = build_col_index(fn,sheet)

      # Read Excel (parsing only relevant cols)
      df_sheet = pd.read_excel(fn,sheet,parse_cols=cols_index)

      # Ensure that the iso codes don't contain strange characters. They can only
      # contain letters, numbers and hyphens. (eg. CN, CN-65 or IN-MP)
      df_sheet['iso'].replace(to_replace='[^a-zA-Z0-9-]', value='',inplace=True,regex=True) 

      # Append each sheet to a dataframe holding the data for that year
      df_yr = df_yr.append(df_sheet)

    # Set the index of the DF to the ISO code and ID of the indicator
    df_yr.set_index(['iso','id'],inplace=True)
    # Make sure the index is sorted so the slicing works well
    df_yr.sortlevel(inplace=True)

    # Rename the column 'score' to value
    df_yr.rename(columns={'score':'value'}, inplace=True)

    
    # Add an extra level in the hierarchy of the columns (Mutli-index)
    # containing an indication of the year

    # Create list that repeats 'value' for the amount of years available
    c = [yr] * len(df_yr.columns)
    # Add a level to the cols
    df_yr.columns = [c, df_yr.columns]

    if first_yr:
      # If it's the first year, we initialize the full DataFrame
      df_full = df_yr
      first_yr = False
    else:
      # Every subsequent year will have to be merged into df_full
      df_full = pd.merge(df_full,df_yr,how='outer',left_index=True,right_index=True)

  df_full.sortlevel(axis=1,inplace=True)

  #############################################################################
  # 2. CSV downloads
  #
  # For all the CSV exports, prepare a dataframe that combines the data with
  # the meta.

  print "Building the CSV files for the download section..."

  # For the CSV, we're only interested in the value column of each year
  df_full_csv = df_full.loc[:,(slice(None),'value')]
  df_full_csv.columns = df_full_csv.columns.get_level_values(0)

  # The full DF is a multi-index. Since the meta-files have a single index,
  # it is necessary to reset the indexes before joining on the column.
  df_full_csv = df_full_csv.reset_index()
  df_meta_aa_csv = df_meta_aa.reset_index()
  df_meta_index_csv = df_meta_index.reset_index()

  # Merge the country meta
  df_full_csv = pd.merge(df_full_csv,df_meta_aa_csv,on='iso')

  # Merge the index meta data
  df_full_csv = pd.merge(df_full_csv,df_meta_index_csv,on='id',suffixes=('_aa','_var'))

  # Re-index the DF on iso & id  and make sure it's sorted
  df_full_csv.set_index(['iso','id'],inplace=True)
  df_full_csv.sortlevel(inplace=True)

  # 2.0 Export the full dataset to CSV

  for lang in settings.langs:
    # Build a list with the meta-data that needs to be included
    columns = ['name:' + lang + '_aa','name:' + lang + '_var','type_var']
    columns = columns + list(years)

    file_path = (settings.exp_full_csv).format(lang=lang)
    df_full_csv.loc[slice(None),columns].to_csv(file_path,encoding='UTF-8',index=False)
  

  # 2.1 Generate the main CSV files

  # Slice the DF to only contain the score and parameters for the current year.
  df_main_csv = df_full_csv.loc[(slice(None),sp),:]

  for lang in settings.langs:
    # Pivot the DF and export it
    file_path = (settings.exp_current_csv).format(lang=lang, yr=current_yr)
    pivot_df(df_main_csv,'name:' + lang + '_aa','name:' + lang + '_var',current_yr).to_csv(file_path,encoding='UTF-8')


  # 2.3 Generate the country + state CSV files
  for aa in admin_areas:
    # Select the data of this admin area
    df_aa_csv = df_full_csv.loc[(aa,slice(None)),:]
    for lang in settings.langs:
      # Include the name of the var, its type and the years
      columns = ['name:' + lang + '_var','type_var'] + list(years)

      # Select the proper columns and generate the CSV
      file_path = (settings.exp_aa_csv).format(lang = lang, aa = aa.lower())
      df_aa_csv.loc[slice(None),columns].to_csv(file_path,encoding='UTF-8',index=False)


  #############################################################################
  # 3. Calculate the rankings
  #
  #
  # Output: df_full
  #
  #             2014                    2015
  #             value   data  gr  sr    value  data  gr  sr
  # iso   id
  # AR    0     1.2420  NaN   13  NaN   1.2235 NaN   12  NaN
  #       1.01  0.1802  73.1  5   NaN   0.1795 75.8  6   NaN
  # ...


  print "Calculating the ranking..."

  # 3.0 Prepare the structure
  # Add placeholder cols with NaN that can be updated later with df.update()
  for year in years:
    for rank in ('gr', 'sr'):
      df_full[(year,rank)] = np.nan
  # Make sure its sorted
  df_full.sortlevel(axis=1,inplace=True)

 
  # 3.1 Global rank
  # The global rank (gr) is a rank of all the COUNTRIES in the project
  df_full = get_rank(countries,df_full,'gr')


  # 3.3 State rank
  # The state rank ('sr') ranks the STATES of a particular country
  for country in countries:
    # Check if there are any states or provinces for this country
    cs = build_set(country,'country','iso',settings.src_meta_aa)
    if cs:
      df_full = get_rank(cs,df_full,'sr')


  #############################################################################
  # 4. JSON api
  #

  print "Building the JSON files for the API..."

  # 4.1 Generate the main JSON file
  for lang in settings.langs:
    # The JSON will contain a list with dicts
    json_data = []
    
    # Loop over the countries list
    for country in countries:
      country_data = build_json_aa(country,df_full,lang, historic=True)
      # Sort the list of states / provinces
      if country_data['states']:
        country_data['states'] = sorted(country_data['states'], key=lambda k: k['name'])
      json_data.append(country_data)

    # Sort the list of countries by name
    sorted_data = sorted(json_data, key=lambda k: k['name'])

    # Write the list to a JSON file
    file_path = (settings.exp_core).format(lang=lang)
    write_json(file_path, sorted_data)


  # 4.3 Generate the country + state JSON files
  for aa in admin_areas:
    for lang in settings.langs:
      # Get the data for this admin area in a dict
      json_data = build_json_aa(aa,df_full,lang,indicators=True,historic=True)

      # Write the dict to a JSON file
      file_path = (settings.exp_aa).format(lang=lang,aa=aa.lower())
      write_json(file_path, json_data)


  # Fully remove the temp directory
  clean_dir(settings.tmp_dir , True)

  print "All done. The data has been prepared for use on global-climatescope.org."
Example #16
0
    parser.add_argument('--inner_lr', type=float, default=1e-2)
    parser.add_argument('--inner_opt', type=str, default='SGD')
    parser.add_argument('--outer_lr', type=float, default=1e-3)
    parser.add_argument('--outer_opt', type=str, default='Adam')
    parser.add_argument('--lr_sched', type=lambda x: (str(x).lower() == 'true'), default=False)
    
    # imaml specific settings
    parser.add_argument('--lambda', type=float, default=2.0)
    parser.add_argument('--version', type=str, default='GD')
    parser.add_argument('--cg_steps', type=int, default=5) 
    
    # network settings
    parser.add_argument('--net', type=str, default='ConvNet')
    parser.add_argument('--n_conv', type=int, default=4)
    parser.add_argument('--n_dense', type=int, default=0)
    parser.add_argument('--hidden_dim', type=int, default=64)
    parser.add_argument('--in_channels', type=int, default=1)
    parser.add_argument('--hidden_channels', type=int, default=64,
        help='Number of channels for each convolutional layer (default: 64).')

    args = parser.parse_args()

    return args

if __name__ == '__main__':
    args = parse_args()
    set_seed(args.seed)
    set_gpu(args.device)
    check_dir(args)
    main(args)
    plt.xlabel('time (seconds)', fontsize=11)
    plt.ylabel('x position', fontsize=11)

    plt.plot(time,
             positions,
             label='Kp = %s, Ki = %s, Kd = %s' %
             (round(P, 2), round(I, 2), round(D, 2)))
    plt.plot(time, [myt.goal_position[0]] * len(positions),
             label='Setpoint',
             ls='--',
             color='black')
    plt.title('Step response')
    plt.ylim(top=myt.goal_position[0] + 10)
    plt.xlim(0, 7)

    plt.legend()
    my_plots.save_visualisation(
        'Step-responsep=kp%ski%skd%s' %
        (round(P, 2), round(I, 2), round(D, 2)), directory)


if __name__ == '__main__':
    dir_ = os.path.join('controllers', 'images')
    utils.check_dir(dir)

    Prop = 100
    Int = 0
    Der = 0

    main(dir_, Prop, Int, Der)
 def save_model(self, model_path=None):
     if model_path is None:
         model_path = self.model_path
     utils.check_dir(os.path.join(model_path, "LR.pkl"))
     joblib.dump(self.model, os.path.join(model_path, "LR.pkl"))
     logging.info('Save model to ' + model_path[:50] + '...')
Example #19
0
    for data in data_array:
        this_date = data.get_date()
        if this_date < check:
            check = this_date
    # Add a relative time field to the objects
    for data in data_array:
        data.set_relative_time(data.get_date() - check)
    # Sort by date for nice plotting
    data_array.sort(key=lambda x: x.get_concentration())
    data_array.sort(key=lambda x: x.get_date())

    #######################
    # Make output file and put the raw
    # histo's in it.
    #
    out_dir = utils.check_dir(args.out_dir)
    outFile = ROOT.TFile("{0}stabaliser_plots.root".format(out_dir),
                         "recreate")

    for dKey in sorted(dates):
        for sKey in samples:
            # Make title strings and root objects for this loop
            hist = False
            date_str = dKey.strftime("%d/%m/%Y")
            base_name = "{0}_{1}".format(date_str, sKey)
            stack_name = "THStack_{0}".format(base_name)
            stack_title = "{0}% Te, {1}g/l PPO, {2}, {3}, {4}".format(
                args.te, args.ppo, args.stabaliser, date_str, sKey)
            canvas_name = "TCanvas_{0}".format(base_name)
            stack = ROOT.THStack(stack_name, stack_title)
            legend = ROOT.TLegend(0.65, 0.65, 0.9, 0.9)
Example #20
0
def main(distances, front_prox_values, back_prox_values, front_prox_comms,
         back_prox_comms, myt_quantity, min_distance):
    """

    :param distances: array containing the distances among the agents for each experiment
    :param front_prox_values: array containing the value of the frontal sensor using prox_values
    :param back_prox_values: array containing the value corresponding to the average of the rear sensor readings using prox_values
    :param front_prox_comms: array containing the value of the frontal sensor using prox_comm
    :param back_prox_comms: array containing the value corresponding to the average of the rear sensor readings using prox_comm
    :param myt_quantity: number of agents
    :param min_distance: length of the agents
    """

    for idx, distance in enumerate(distances):

        initial_positions = np.array(
            [0, min_distance + distance, (min_distance + distance) * 2],
            dtype=np.float64)

        world = pyenki.World()

        myts = []
        for i in range(myt_quantity):
            myt = Thymio(name='myt%d' % (i + 1),
                         index=i,
                         use_aseba_units=False)

            myt.position = (initial_positions[i], 0)
            myt.initial_position = myt.position

            # Reset the parameters
            myt.angle = 0

            myt.prox_comm_tx = 0
            myt.prox_comm_enable = True

            myts.append(myt)
            world.add_object(myt)

        print('distance = %d' % distance)
        # for m in myts:
        #     print(m.name, round(m.initial_position[0], 1), round(m.position[0], 1))

        sensors = dict()

        a = []
        b = []
        c = []
        d = []
        for _ in range(5):
            world.step(dt=0.1)

            sensing = myts[1].get_input_sensing()
            front_prox_value = sensing[2]
            back_prox_value = np.mean([sensing[5], sensing[6]])

            front_prox_comm = sensing[9]
            back_prox_comm = np.mean([sensing[12], sensing[13]])

            a.append(int(front_prox_value))
            b.append(int(back_prox_value))
            c.append(int(front_prox_comm))
            d.append(int(back_prox_comm))

        front_prox_values[idx] = int(np.mean(a))
        back_prox_values[idx] = int(np.mean(b))
        front_prox_comms[idx] = int(np.mean(c))
        back_prox_comms[idx] = int(np.mean(d))

        sensors['front_prox_values'] = int(np.mean(a))
        sensors['back_prox_values'] = int(np.mean(b))
        sensors['front_prox_comm'] = int(np.mean(c))
        sensors['back_prox_comm'] = int(np.mean(d))

        print(sensors)

    sensing_to_distances = [
        distances, front_prox_values, back_prox_values, front_prox_comms,
        back_prox_comms
    ]

    file = os.path.join('controllers', 'sensing_to_distances.pkl')
    with open(file, 'wb') as f:
        pickle.dump(sensing_to_distances, f)

    plt.figure()
    plt.xlabel('distance', fontsize=11)
    plt.ylabel('sensing', fontsize=11)

    plt.plot(distances, front_prox_values, label='front_prox_values')
    plt.plot(distances, back_prox_values, label='back_prox_values')
    plt.plot(distances, front_prox_comms, label='front_prox_comm')
    plt.plot(distances, back_prox_comms, label='back_prox_comm')
    plt.legend()

    dir = os.path.join('controllers', 'images')
    utils.check_dir(dir)

    my_plots.save_visualisation('sensing_to_distances', dir)
Example #21
0
 def save_model(self, model_path=None):
     if model_path is None:
         model_path = self.model_path
     utils.check_dir(model_path)
     torch.save(self.state_dict(), model_path)
 def save_model(self, model_path=None) -> NoReturn:
     if model_path is None:
         model_path = self.model_path
     utils.check_dir(model_path)
     torch.save(self.state_dict(), model_path)
     logging.info('Save model to ' + model_path[:50] + '...')
def train(args):
    """ Train model on trainset and validate on valset
    """
    # get image paths and split dataset
    trainval_dir = os.path.join(args.dataset_dir, 'train_val')
    check_dir(trainval_dir, report_error=True)
    train_img_paths, test_img_paths, label2idx, idx2label = \
        get_and_split_dataset(trainval_dir, test_size=args.test_ratio)
    with open(args.label_path, 'w') as wfid:
        wfid.write(json.dumps(label2idx))

    # create augumentation operations
    aug = ImageDataGenerator(rotation_range=25,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # create generators of trainset and testset
    trainset = Dataset(image_paths=train_img_paths,
                       label2idx=label2idx,
                       idx2label=idx2label,
                       target_shape=TARGET_SHAPE,
                       batch_size=args.batch_size,
                       shuffle=True)
    valset = Dataset(image_paths=test_img_paths,
                     label2idx=label2idx,
                     idx2label=idx2label,
                     target_shape=TARGET_SHAPE,
                     batch_size=args.batch_size,
                     shuffle=False)
    # build model
    model = SmallerVGGNet.build(width=TARGET_SHAPE[1],
                                height=TARGET_SHAPE[0],
                                depth=TARGET_SHAPE[2],
                                classes=len(label2idx))
    opt = Adam(lr=args.init_lr, decay=args.init_lr / args.epochs)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    model.summary()

    # create callback
    callbacks = [
        ModelCheckpoint(args.model_path,
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=1),
        # EarlyStopping(patience=50),
        ReduceLROnPlateau(patience=10),
        CSVLogger("training.log")
    ]

    # train model
    H = model.fit_generator(
        trainset.generate(),
        steps_per_epoch=trainset.get_dataset_size() // args.batch_size,
        validation_data=valset.generate(),
        validation_steps=valset.get_dataset_size() // args.batch_size,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks)

    # plot curve
    plt.style.use("ggplot")
    plt.figure()
    N = args.epochs
    plt.plot(np.arange(0, N), H.history['loss'], label='train_loss')
    plt.plot(np.arange(0, N), H.history['val_loss'], label='val_loss')
    plt.plot(np.arange(0, N), H.history['acc'], label='train_acc')
    plt.plot(np.arange(0, N), H.history['val_acc'], label='val_acc')
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="upper left")
    plt.savefig(args.plot_path)
def fixed_bdd_to_yolo(path='./bdd100k', save_path='./bdd100k/val', mode='val',
                      x_scale=1. / 1280., y_scale=1. / 720.):
    
    img_root = os.path.join(path, 'images/100k/' + mode)
    img_save_path = os.path.join(save_path, 'images')
    label_save_path = os.path.join(save_path, 'labels')
    check_dir(img_save_path)
    check_dir(label_save_path)
    
    labels_path = os.path.join(path, 'labels/bdd100k_labels_images_' + mode + '.json') 
    with open(labels_path) as f:
        all_labels = json.load(f)
    
    names = []
    rider_labels = []
    
    ######
    for labels in all_labels:
        filename = labels['name']
        for label in labels['labels']:
            if label['category'] == "rider":
                names.append(filename)
                rider_labels.append(labels)
                break
                
                
    #####
    index = 0
    while index < len(rider_labels):
        filename = names[index]
        labels = rider_labels[index]
        
        bbox_labels = {"rider" : [], "bike" : [], "motor":[], "normal": []}
        for label in labels['labels']:
            category = label['category']
            if category in trans_dict.keys() or category == 'rider':
                xmin = int(label['box2d']['x1'])
                ymin = int(label['box2d']['y1'])
                xmax = int(label['box2d']['x2'])
                ymax = int(label['box2d']['y2'])

                if category in bbox_labels.keys():
                    if category == 'rider':
                        bbox_labels[category].append([xmin,ymin,xmax,ymax + 20])
                    else:
                        bbox_labels[category].append([xmin,ymin,xmax,ymax])
                else:
                    bbox_labels['normal'].append([trans_dict[category], xmin, ymin, xmax, ymax])
                    
        datas = rider_iou_pair(bbox_labels)

        if datas == None:
            rider_labels.pop(index)
            names.pop(index)
            continue
            
        dest = os.path.join(label_save_path, filename[:-4] + ".txt")
        with open(dest, "w") as weiter:
            for bbox in datas + bbox_labels['normal']:
                bbox[1], bbox[2], bbox[3], bbox[4] = xy2cxcywh(bbox[1], bbox[2], bbox[3], bbox[4],
                                                              x_scale, y_scale)
                bbox = tuple(bbox)
                msg = msg_format % bbox
                weiter.write(msg)
        shutil.copyfile(os.path.join(img_root, filename), os.path.join(img_save_path, filename))
            
        index += 1
Example #25
0
    init_parser.add_argument('--model_name', type=str, default='BPR', help='Choose a model to run.')
    init_args, init_extras = init_parser.parse_known_args()
    model_name = eval('{0}.{0}'.format(init_args.model_name))
    reader_name = eval('{0}.{0}'.format(model_name.reader))
    runner_name = eval('{0}.{0}'.format(model_name.runner))

    # Args
    parser = argparse.ArgumentParser(description='')
    parser = parse_global_args(parser)
    parser = reader_name.parse_data_args(parser)
    parser = runner_name.parse_runner_args(parser)
    parser = model_name.parse_model_args(parser)
    args, extras = parser.parse_known_args()

    # Logging configuration
    log_args = [init_args.model_name, args.dataset, str(args.random_seed)]
    for arg in ['lr', 'l2'] + model_name.extra_log_args:
        log_args.append(arg + '=' + str(eval('args.' + arg)))
    log_file_name = '__'.join(log_args).replace(' ', '__')
    if args.log_file == '':
        args.log_file = '../log/{}/{}.txt'.format(init_args.model_name, log_file_name)
    if args.model_path == '':
        args.model_path = '../model/{}/{}.pt'.format(init_args.model_name, log_file_name)

    utils.check_dir(args.log_file)
    logging.basicConfig(filename=args.log_file, level=args.verbose)
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(init_args)

    main()
 def save_model(self, model_path=None):
     if model_path is None:
         model_path = self.model_path
     utils.check_dir(model_path)
     logging.info('Save model to ' + model_path[:50] + '...')
Example #27
0
        plot_compared_distance_compressed(dataset_folders_dist, runs_img_dir, datasets_dist,
                                         'Robot distances from goal', 'distances-from-goal-compressed-distributed')
        
        plot_compared_distance_compressed(dataset_folders_comm, runs_img_dir, datasets_comm,
                                         'Robot distances from goal', 'distances-from-goal-compressed-communication')

        # Evaluate the learned controllers by passing a specific initial position configuration and compare them with
        # the omniscient and the manual controllers
        test_controller_given_init_positions(runs_img_dir, args.model, args.net_input)

    if args.generate_animations:
        from utils.my_plots import animate_simulation, plot_simulations, visualise_position_over_time
        from utils.utils import generate_fake_simulations, generate_init_positions, check_dir
        import numpy as np
        animations_dir = os.path.join(d, 'animations')
        check_dir(animations_dir)

        plots_dir = os.path.join(d, 'plots')
        check_dir(plots_dir)

        # Create a simulation for each of the controller using the same initial position
        if args.myt_quantity == 'variable':
            myt_quantities = np.arange(5, 11)
        else:
            myt_quantities = [int(args.myt_quantity)]

        for N in myt_quantities:
            dir = os.path.join(plots_dir, 'N%d' % N)
            check_dir(dir)
            _ = generate_fake_simulations(dir, args.model, N, simulations=100)
            out_dirs = [os.path.join(dir, 'omniscient'), os.path.join(dir, 'manual'), os.path.join(dir, 'distributed'), os.path.join(dir, 'communication')]