Beispiel #1
0
class BaseTest(unittest.TestCase):
    param = Parameters()
    welcomepage = WelcomePage(param.w, param.rootUrl)
    loginpage = LoginPage(param.w, param.rootUrl)
    mainpage = MainPage(param.w, param.rootUrl)
    framepage = FramePage(param.w, param.rootUrl)
    def setUp(self):
        self.param.w.get(self.param.rootUrl)
        self.param.w.maximize_window()
        assert self.welcomepage.check_page()
    @classmethod
    def tearDownClass(cls):
        cls.param.w.quit()
Beispiel #2
0
def test():
    params = Parameters()
    # 提取captions_train2014中的信息,得到图片名与对应的文本描述
    image_name, text = preprocess(params)
    print(len(image_name))
    print(len(text))

    time1 = time()
    text_vector = text_vectorize(text, params)
    print(text_vector)
    print(text_vector.shape)
    time2 = time()
    print('text vectorize costs {} s'.format(time2 - time1))
Beispiel #3
0
def image_name_lable_mapping():
    params = Parameters()
    cluster_file_path = os.path.join(
        params.output_dir,
        params.saved_topic_file_name + '_' + str(params.topic_num) + '.json')
    with open(cluster_file_path, 'r') as f:
        cluster = json.load(f)
    print(len(cluster))

    image_name, _ = preprocess(params)
    print(len(image_name))

    dataset = dict(zip(image_name, cluster))
    with open(os.path.join('gen_data/', 'mapping_dataset_5.json'), 'w') as f:
        json.dump(dataset, f)
    print('finished!')
Beispiel #4
0
def main():
    params = Parameters()
    # 提取captions_train2014中的信息,得到图片名与对应的文本描述
    image_name, text = preprocess(params)
    print(len(image_name))
    print(len(text))

    time1 = time()
    text_vector = text_vectorize(text, params)
    print(text_vector)
    print(text_vector.shape)
    time2 = time()
    print('text vectorize costs {} s'.format(time2 - time1))

    time3 = time()
    cluster = km_clustering(text_vector, params)
    time4 = time()
    print('kmeans cluster costs {} s'.format(time4 - time3))
    # print(cluster)
    print(len(cluster))

    image_name_label = dict(zip(image_name, cluster))
    random.shuffle(image_name)
    label = [image_name_label[img_name] for img_name in image_name]

    train_image_name, train_label = image_name[0:50000], label[0:50000]
    val_image_name, val_label = image_name[50000:70000], label[50000:70000]
    test_image_name, test_label = image_name[70000:], label[70000:]

    train_data = {'image_name': train_image_name, 'label': train_label}

    val_data = {'image_name': val_image_name, 'label': val_label}
    test_data = {'image_name': test_image_name, 'label': test_label}

    dataset = {'train': train_data, 'val': val_data, 'test': test_data}
    dataset_path = os.path.join(
        params.output_dir, 'dataset_topic_' + str(params.topic_num) + '.json')
    with open(dataset_path, 'w') as f:
        json.dump(dataset, f)
    print('finished!')
Beispiel #5
0
def main():
    # 设置参数
    params = Parameters()

    # dataset = build_dataset(params)
    # for img_name, captions in dataset:
    #     print(img_name, captions)
    img_ids, img_names, captions, similarity_scores = build_dataset(params)

    selected_img_names = img_names[:20000] + img_names[-20000:]
    labels = gen_labels(selected_img_names)

    train_img_names = selected_img_names[:30000]
    train_labels = labels[:30000]

    val_img_names = selected_img_names[30000:35000]
    val_labels = labels[30000:35000]

    test_img_names = selected_img_names[35000:40000]
    test_labels = labels[35000:40000]

    train_dataset = {'image_name': train_img_names, 'label': train_labels}
    val_dataset = {'image_name': val_img_names, 'label': val_labels}
    test_dataset = {'image_name': test_img_names, 'label': test_labels}
    dataset = {
        'train': train_dataset,
        'val': val_dataset,
        'test': test_dataset
    }
    if not os.path.exists(params.output_dir):
        os.mkdir(params.output_dir)
    dataset_path = os.path.join(
        params.output_dir,
        params.mode_dataset_name + '_' + params.mode + '.json')

    with open(dataset_path, 'w') as f:
        json.dump(dataset, f)
    print('finished')
    q_gui = {
        "master2gui": q_master["master2gui"],
        "gui2master": q_master["gui2master"]
    }
    q_robots = {
        "robots2messenger": q_master["robots2messenger"],
        "messenger2robots": q_master["messenger2robots"]
    }
    q = {"master": q_master, "robots": q_robots, "gui": q_gui}
    return Container(q)


#---------------
if __name__ == '__main__':

    params = Parameters(path="../config.yaml")
    mean_time = []
    std_time = []
    for lambda_ in np.linspace(0, 2, 21):
        times = []
        n_failures = 0
        for i in range(10):
            q = make_queues()
            env = Environment()
            gt = set(env.ground_truth)
            robot1 = Robot(1, q.robots, env)
            robot2 = Robot(2, q.robots, env)
            master = NaiveMaster(params, q.master, lambda_=lambda_)
            robot1.start()
            robot2.start()
            master.start()
Beispiel #7
0
        reindex_y_full = pd.concat([reindex_y_full, chunklist_y_full[i + 2]],
                                   ignore_index=True)

    #reset empty space
    chunklist_TT_df_reduced = []
    chunklist_y_full = []

    return reindex_TT_df_reduced, reindex_y_full


# ---------------------------------------------------- Paramaters -----------------------------------------------------------------------------------------------------

# Here we choose the 4X0 geometry, which correspond to SND@LHC pilot run
# You need to change the X/Y half DIM in the .json file to 26.0 and 21.5

params = Parameters("4X0")

# Path to the raw Data root file and the pickle file
filename = "/dcache/bfys/smitra/DS5/DS5.root"
loc_of_pkl_file = "/dcache/bfys/smitra/DS5/new_ship_tt_processed_data_forcompressed"
processed_file_path = os.path.expandvars(loc_of_pkl_file)
name_of_angle_file = "results/Angle_histo.root"
name_of_red_dim_hist = "results/XY_histo.root"

# Usualy, Data file is too large to be read in one time, that the reason why we read it by "chunk" of step_size events
step_size = 3750  # number of event in a chunk
file_size = 150000  # number of events to be analyse. Maximum number for DS5.root is 200'000

n_steps = int(file_size / step_size)  # number of chunks
#nb_of_plane = len(params.snd_params[params.configuration]["TT_POSITIONS"])
nb_of_plane = 2
Beispiel #8
0
import os
import json
import sqlite3
import codecs

import utils.Parameters as Parameters
import utils.Graph as Graph
import utils.Node as Node
import utils.Edge as Edge
import NodeInfo
import EdgeInfo
import ArgumentDiagram

# paths
parameters = Parameters.Parameters()

# Functions


def getAllFilenamesFromDir(path):

    filesName = []

    for textFile in os.listdir(path):
        if textFile.endswith(".txt"):
            currentFileName = textFile.split(".txt")[0]
            filesName.append(currentFileName)
    return filesName

Beispiel #9
0
        print(f"train_step:{i} ret: {avg_ret:.1f}")
        with fw.as_default():
            tf.summary.scalar("return", avg_ret, i)


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="Hyper prams")

    parser.add_argument("--replay_size", type=str)
    parser.add_argument("--start_size", type=float)
    parser.add_argument("--gamma", type=float)
    parser.add_argument("--lr_a", type=float)
    parser.add_argument("--batch_size", type=int)
    parser.add_argument("--lr_c", type=float)
    parser.add_argument("--parms_path",
                        type=str,
                        default="parms/Pendulum-v0.json")

    args = parser.parse_args()
    terminal_parms = vars(args)

    # json parms
    import json
    json_parms = json.load(open(terminal_parms['parms_path']))

    json_parms.update({k: terminal_parms[k] for k in terminal_parms \
                        if terminal_parms[k] is not None})
    parms_ = Parameters(json_parms)
    main(parms_)
Beispiel #10
0
else:
    print('NOT Using CUDA. This can be slow.')

# Preprocessing of dataset
input_lang, output_lang, pairs = prepare_data(None, None, dataset, 'train',
                                              args.maxlength)
input_lang, output_lang, val_pairs = prepare_data(input_lang, output_lang,
                                                  dataset, 'val',
                                                  args.maxlength)
input_lang, output_lang, test_pairs = prepare_data(input_lang, output_lang,
                                                   dataset, 'test',
                                                   args.maxlength)

# Encapsulation of fundamental Parameters
pars = Parameters(input_lang, output_lang, args.maxlength, EOS_token,
                  SOS_token, UNK_token, args.cuda, print_every, plot_every,
                  n_epochs, learning_rate)

# Instantiation of Encoder and Decoder
encoder = EncoderRNN(input_lang.n_words, hidden_size, pars, n_layers)
decoder = AttnDecoderRNN(hidden_size,
                         output_lang.n_words,
                         pars,
                         n_layers,
                         dropout_p=dropout_p)

# Loading of previously trained models
if args.load:
    load_model(encoder, 'encoder')
    load_model(decoder, 'decoder')
Beispiel #11
0
def run(input_file, parameters_file, output_file, cascade_file, gui):
    data_out = []  # Output variable
    save = True  # Toggle save function
    rec_reset = True  # Toggle reset for analyzing in the cmd

    # Setup default settings if GUI in use
    if gui:
        # Save false, because we don't want to save/overwrite automaticly when in GUI
        save = False
        window = init_gui()
        input_file = Path(window.ask_file("Input video file"))
        output_file = input_file.parent / (input_file.stem + "_analysis.csv")
        parameters_file = input_file.parent / (input_file.stem +
                                               "_analysis.prm")

    # Init Parameters object where all the parameters are saved
    params = Parameters(parameters_file)

    # Loads parameters from file, if exists
    if parameters_file.exists():
        print("Loading parameters from file.")
        params.load_parameters()
    else:
        # No parameter file, so using defaults
        print("No parameters file. Loading default parameters.")
        # Default parameters
        params.params['b_val'] = 118
        params.params['c_val'] = 61
        params.params['thres_val'] = 85
        params.params['blur_val'] = 3
        params.params['pad_val'] = 12
        params.params['area_x'] = 160
        params.params['area_y'] = 160
        params.params['const_track'] = 0
        params.save_parameters()

    # Init capture
    cap = init_capture(input_file)

    # If GUI, set controls and add progress frame counter
    if gui:
        set_controls(window, params.params)
        counter = window.create_progress()

    # Setup filters
    pre_filt = VideoFiltering()
    out_filt = VideoFiltering()
    disp_filt = VideoFiltering()

    # Setup tracking and identify eye area
    tracker = Tracking(copy.deepcopy(cap.frame))
    tracker.haar_classifier(cascadeFile=str(cascade_file))
    haar_pt = tracker.get_tracking_point('haar')
    # while not haar_pt:
    #     print("Did not find eye. Trying again next frame.")
    #     cap.capture_frame()
    #     tracker = Tracking(copy.deepcopy(cap.frame))
    #     tracker.haar_classifier(cascadeFile=str(cascade_file))
    #     haar_pt = tracker.get_tracking_point('haar')

    # Display video stats
    print("\nFrame count of the video: {}".format(int(cap.get_total_frames())))
    print("FPS of the video: {}".format(int(cap.get_fps())))
    print("Runtime in seconds: {}".format(cap.get_lenght_in_s()))

    # Main loop
    # Rework to work with threading and in the tkinter mainloop
    while (cap.capture_open()):

        if not haar_pt:
            print("Did not find eye. Trying again.")
            tracker.haar_classifier(cascadeFile=str(cascade_file))
            haar_pt = tracker.get_tracking_point('haar')

        # Copy current frame to all filters and trackers
        pre_filt.frame = copy.deepcopy(cap.frame)
        out_filt.frame = copy.deepcopy(cap.frame)
        disp_filt.frame = copy.deepcopy(cap.frame)
        # Tracking is done for pre filtered frame
        tracker.frame = copy.deepcopy(pre_filt.frame)

        # Draw bounding box for detected area and select region of interest for the ouput frame
        if haar_pt is not None and haar_pt:
            disp_filt.draw_bounding_box(haar_pt, params.params['pad_val'])
            out_filt.crop_roi(haar_pt, params.params['pad_val'])

        # Filtering the output
        out_filt.blur((params.params['blur_val'], params.params['blur_val']))
        out_filt.brightness_contrast(params.params['b_val'],
                                     params.params['c_val'])
        out_filt.threshold(params.params['thres_val'])
        out_filt.resize((params.params['area_x'], params.params['area_y']))

        # These will be run, if in GUI
        if gui:
            # Updating GUI if window is still open
            if window.get_status():
                update_counter(counter, out_filt, cap)
                update_parameters(window, params)
                window.add_video_frame_left(disp_filt.frame.frame,
                                            disp_filt.frame.get_frame_size())
                window.add_video_frame_right(out_filt.frame.frame,
                                             out_filt.frame.get_frame_size())
                window.update_gui()

            # Check status of preview and analyze buttons in the GUI
            if window.run or window.prev:
                # Resets video before running analysis or previw
                if window.reset or (window.prev
                                    and not out_filt.frame.frame_num <
                                    cap.get_total_frames() - 1):
                    cap.reset()
                    window.reset = False
                elif window.run:
                    # Flag save if analysis started
                    save = True
                    # Update progress in command line and append frame data to end result
                    update_counter_cmd(out_filt, cap)
                    append_frame_data(data_out, out_filt)
                cap.capture_frame()

            # Check parameters save button status and saves
            if window.save_params:
                print("Saving parameters to file.")
                window.save_params = False
                params.save_parameters()

            # Exit capture and program if window not open anymore
            if not window.get_status():
                cap.release_capture()

        # These run only in when in command line
        else:
            if rec_reset:
                cap.reset()
                rec_reset = False
            else:
                update_counter_cmd(out_filt, cap)
                append_frame_data(data_out, out_filt)
            cap.capture_frame()

        # Detect eye location and update if tracking enabled
        if params.params['const_track']:
            tracker.haar_classifier(cascadeFile=str(cascade_file),
                                    minSize=(24, 54))
            haar_pt = tracker.get_tracking_point('haar')

    # Save data to csv if save flag enabled
    if save:
        with open(output_file, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerows(data_out)
Beispiel #12
0
import root_numpy
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
from IPython import display
import os
from ROOT import TH1F, TFile
# Turn off interactive plotting: for long run it makes screwing up everything
plt.ioff()

# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------

# Here we choose the geometry with 9 time the radiation length
params = Parameters(
    "4X0")  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE X/Y DIM !!!!!!!!!!!!!!!!

processed_file_path = os.path.expandvars(
    "$HOME/DS5/ship_tt_processed_data"
)  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE PATH !!!!!!!!!!!!!!!!

step_size = 5000  # size of a chunk
file_size = 180000  # size of the BigFile.root file
n_steps = int(file_size / step_size)  # number of chunks
nb_of_plane = len(params.snd_params[params.configuration]["TT_POSITIONS"])

# ----------------------------------------- PRODUCE THE tt_cleared.pkl & y_cleared.pkl IN ship_tt_processed_data/ FOLDER -------------------------------------------------
'''
# It is reading and analysing data by chunk instead of all at the time (memory leak problem)
print("\nProducing the tt_cleared.pkl & y_cleared.pkl files by chunk")
data_preprocessor = DataPreprocess(params)
Beispiel #13
0
    raise Exception("CUDA is not available")
n_devices = torch.cuda.device_count()
print("\nWelcome!\n\nCUDA devices available:\n")
for i in range(n_devices):
    print("\t{}\twith CUDA capability {}".format(
        torch.cuda.get_device_name(device=i),
        torch.cuda.get_device_capability(device=i)))
print("\n")
device = torch.device("cuda", 0)

# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Turn off interactive plotting: for long run it makes screwing up everything
plt.ioff()

# Here we choose the geometry with 9 time the radiation length
params = Parameters(
    "4X0")  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE DIMENTION !!!!!!!!!!!!!!!!
processed_file_path = os.path.expandvars(
    "$HOME/DS5/ship_tt_processed_data"
)  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE PATH !!!!!!!!!!!!!!!!
step_size = 5000  # size of a chunk
file_size = 180000  # size of the BigFile.root file
n_steps = int(file_size / step_size)  # number of chunks

# ------------------------------------------ LOAD THE reindex_TT_df & reindex_y_full PD.DATAFRAME --------------------------------------------------------------------------

chunklist_TT_df = []  # list of the TT_df file of each chunk
chunklist_y_full = []  # list of the y_full file of each chunk

# It is reading and analysing data by chunk instead of all at the time (memory leak problem)
print("\nReading the tt_cleared_reduced.pkl & y_cleared.pkl files by chunk")
#First 2
Beispiel #14
0
#------------------------------------------  In the notebook (miss 'from IPython import display' ??)
from net import SNDNet, MyDataset, digitize_signal

device = torch.device("cuda", 0)
from matplotlib import pylab as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
import os
#----------------------------------------- Not in the notebook
import gc
#----------------------------------------- In the notebook
params = Parameters("9X0")
data_preprocessor = DataPreprocess(params)
filename = "/data/ship_tt/9X0_500k.root"
#----------------------------------------- Not in the notebook: it is reading and analysing data by chunk
print("\nReading data now. Be patient...")
processed_file_path = os.path.expandvars("$HOME/ship_tt_processed_data")
os.system("mkdir -p {}".format(processed_file_path))
step_size = 5000
file_size = 500000
n_steps = int(file_size / step_size)

chunklist_TT_df = []
chunklist_y_full = []

for i in tqdm(range(n_steps)):
    gc.collect()
Beispiel #15
0
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", default="cifar10_linf_AT", type=str)
    parser.add_argument("--model", default="PreActResNet18")
    parser.add_argument("--batch-size", type=int)
    parser.add_argument("--dataset", type=str)
    parser.add_argument("--data-dir", default="~/datasets/", type=str)
    parser.add_argument("--max-epoch", type=int)
    parser.add_argument("--epoch", default=1, type=int)
    parser.add_argument("--defense", type=str)
    parser.add_argument("--attack", type=str)
    parser.add_argument("--inner-loss", type=str)
    parser.add_argument("--outer-loss", type=str)
    parser.add_argument("--log-step", type=int)
    parser.add_argument("--lr", type=float)
    parser.add_argument("--lr-adjust", type=str)
    parser.add_argument("--weight-decay", type=float)
    parser.add_argument("--epsilon", type=int)
    parser.add_argument("--attack-iters", type=int)
    parser.add_argument("--pgd-alpha", type=float)
    parser.add_argument("--norm", type=str, choices=["l_inf", "l_2"])
    parser.add_argument("--fname", type=str)
    parser.add_argument("--seed", default=0, type=int)
    parser.add_argument("--resume-checkpoint", default="", type=str)
    parser.add_argument("--tensorboard", action="store_true")
    parser.add_argument("--project", default="AT-Framework", type=str)
    parser.add_argument("--no-amp", action="store_true")
    parser.add_argument("--gpu", default="0", type=str)
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    torch.backends.cudnn.benchmark = True

    # torch.backends.cudnn.enabled = True   # 默认值
    # torch.backends.cudnn.benchmark = False  # 默认为False
    # torch.backends.cudnn.deterministic = True

    import configs

    try:
        config = getattr(configs, args.config + "_config")
        args = vars(args)
        args = {**config, **{k: args[k] for k in args if args[k] is not None}}
        args = Parameters(args)
    except Exception:
        raise NotImplementedError(f"No such configuration: {args.config}")

    args.data_dir = f"{args.data_dir}/{args.dataset}"

    args.fname = args.fname + "_" + args.model
    args.checkpoints = args.fname + "_checkpoints"

    current_time = time.strftime("%Y-%m-%d %H-%M-%S", time.localtime())
    args.fname = args.fname + "/" + current_time
    args.checkpoints = args.checkpoints + "/" + current_time

    output_dir = "Outputs/"
    args.fname = output_dir + args.fname
    args.checkpoints = output_dir + args.checkpoints

    return args