Esempio n. 1
0
def do_economy_parameters(simInfo, pList, parameterDefs):
    """Parse the economy parameters and create the economies

    Each economy must have a name.
    Its parameters inherit from the simulator parameters
    @param pList: list of 5-tuples of parameters
    @param parameterDefs:
    @raise: ParameterError
    """
    economyList = [(eName, dType, dName, dVal) for eType, eName, dType, dName, dVal in pList if eType == 'economy']
    if not economyList:
        msg = "No economy parameters in %s" % simInfo.runDesc
        logging.error(msg)
        raise ParameterError(msg)

    # go through the list, sorted by economy name so all the params for one economy are together
    # and the economies are always created in the same order
    currentName = None
    currentParams = None
    for eName, dType, dName, dVal in sorted(economyList, key=itemgetter(0)):
        if eName != currentName:
            # starting a new one
            currentName = eName
            currentParams = Parameters(parameterDefs, simInfo.theParameters)
            econ = Economy(simInfo, eName, currentParams)
            logger.debug("Setting parameters for %s" % econ)
        if dType == 'parameter':
            currentParams.set(dName, dVal)
Esempio n. 2
0
def do_bank_parameters(simInfo, pList, parameterDefs):
    """Parse the bank parameters and create the banks

    Each bank must have a name and must belong to an economy.
    Its parameters inherit from those of its economy.
    @param pList: list of 5-tuples of parameters
    @param parameterDefs:
    @raise: ParameterError
    """
    bankList = [(eName, dType, dName, dVal) for eType, eName, dType, dName, dVal in pList if eType == 'bank']
    if not bankList:
        msg = "No bank parameters in %s" % simInfo.runDesc
        logging.error(msg)
        raise ParameterError(msg)

    # go through the list, sorted by bank name so all the params for one bank are together, with attributes
    # before params, and banks are always created in the same order
    currentName = None
    currentParams = None
    currentEconomyName = None
    for eName, dType, dName, dVal in sorted(bankList, key=itemgetter(0, 1)):
        if eName != currentName:
            # starting a new one
            currentName = eName
            currentParams = Parameters(parameterDefs, simInfo.theParameters)
            currentEconomyName = None
            logger.debug("Setting parameters for bank %s" % currentName)

        if dType == 'attribute' and dName == 'economy':
            currentEconomyName = dVal
            if not currentEconomyName in simInfo.economyDirectory:
                msg = "Invalid economy %r specified for bank %s in %s" % (
                    currentEconomyName, currentName, simInfo.runDesc)
                logger.error(msg)
                raise ParameterError(msg)
            econ = simInfo.economyDirectory[currentEconomyName]
            currentParams = Parameters(parameterDefs, econ.params)
            followMe = simInfo.followBank == currentName
            bank = Bank(simInfo, currentName, econ, currentParams, followMe)
            logger.debug("Created %s" % bank)

        elif dType == 'parameter':
            if not currentEconomyName:
                msg = "No economy specified for bank %s in %s" % (currentName, simInfo.runDesc)
                logger.error(msg)
                raise ParameterError(msg)
            currentParams.set(dName, dVal)
Esempio n. 3
0
    def __init__(self, learning_rate):
        self.params = Parameters()
        self.learning_rate = learning_rate

        self.s0 = tt.matrix('s0')
        self.s0.tag.test_value = self.test_s0
        self.params.w_x = initial_weights(self.n_cells, self.input_size)
        self.params.w_f = initial_weights(self.n_cells, self.input_size)
        self.params.w_i = initial_weights(self.n_cells, self.input_size)

        self.params.w_clf = initial_weights(self.clf_size, self.n_cells)
        self.params.b_clf = initial_weights(self.clf_size)
Esempio n. 4
0
def test():
    params = Parameters()
    # 提取captions_train2014中的信息,得到图片名与对应的文本描述
    image_name, text = preprocess(params)
    print(len(image_name))
    print(len(text))

    time1 = time()
    text_vector = text_vectorize(text, params)
    print(text_vector)
    print(text_vector.shape)
    time2 = time()
    print('text vectorize costs {} s'.format(time2 - time1))
Esempio n. 5
0
class BaseTest(unittest.TestCase):
    param = Parameters()
    welcomepage = WelcomePage(param.w, param.rootUrl)
    loginpage = LoginPage(param.w, param.rootUrl)
    mainpage = MainPage(param.w, param.rootUrl)
    framepage = FramePage(param.w, param.rootUrl)
    def setUp(self):
        self.param.w.get(self.param.rootUrl)
        self.param.w.maximize_window()
        assert self.welcomepage.check_page()
    @classmethod
    def tearDownClass(cls):
        cls.param.w.quit()
Esempio n. 6
0
def image_name_lable_mapping():
    params = Parameters()
    cluster_file_path = os.path.join(
        params.output_dir,
        params.saved_topic_file_name + '_' + str(params.topic_num) + '.json')
    with open(cluster_file_path, 'r') as f:
        cluster = json.load(f)
    print(len(cluster))

    image_name, _ = preprocess(params)
    print(len(image_name))

    dataset = dict(zip(image_name, cluster))
    with open(os.path.join('gen_data/', 'mapping_dataset_5.json'), 'w') as f:
        json.dump(dataset, f)
    print('finished!')
Esempio n. 7
0
def main():
    params = Parameters()
    # 提取captions_train2014中的信息,得到图片名与对应的文本描述
    image_name, text = preprocess(params)
    print(len(image_name))
    print(len(text))

    time1 = time()
    text_vector = text_vectorize(text, params)
    print(text_vector)
    print(text_vector.shape)
    time2 = time()
    print('text vectorize costs {} s'.format(time2 - time1))

    time3 = time()
    cluster = km_clustering(text_vector, params)
    time4 = time()
    print('kmeans cluster costs {} s'.format(time4 - time3))
    # print(cluster)
    print(len(cluster))

    image_name_label = dict(zip(image_name, cluster))
    random.shuffle(image_name)
    label = [image_name_label[img_name] for img_name in image_name]

    train_image_name, train_label = image_name[0:50000], label[0:50000]
    val_image_name, val_label = image_name[50000:70000], label[50000:70000]
    test_image_name, test_label = image_name[70000:], label[70000:]

    train_data = {'image_name': train_image_name, 'label': train_label}

    val_data = {'image_name': val_image_name, 'label': val_label}
    test_data = {'image_name': test_image_name, 'label': test_label}

    dataset = {'train': train_data, 'val': val_data, 'test': test_data}
    dataset_path = os.path.join(
        params.output_dir, 'dataset_topic_' + str(params.topic_num) + '.json')
    with open(dataset_path, 'w') as f:
        json.dump(dataset, f)
    print('finished!')
Esempio n. 8
0
def main():
    # 设置参数
    params = Parameters()

    # dataset = build_dataset(params)
    # for img_name, captions in dataset:
    #     print(img_name, captions)
    img_ids, img_names, captions, similarity_scores = build_dataset(params)

    selected_img_names = img_names[:20000] + img_names[-20000:]
    labels = gen_labels(selected_img_names)

    train_img_names = selected_img_names[:30000]
    train_labels = labels[:30000]

    val_img_names = selected_img_names[30000:35000]
    val_labels = labels[30000:35000]

    test_img_names = selected_img_names[35000:40000]
    test_labels = labels[35000:40000]

    train_dataset = {'image_name': train_img_names, 'label': train_labels}
    val_dataset = {'image_name': val_img_names, 'label': val_labels}
    test_dataset = {'image_name': test_img_names, 'label': test_labels}
    dataset = {
        'train': train_dataset,
        'val': val_dataset,
        'test': test_dataset
    }
    if not os.path.exists(params.output_dir):
        os.mkdir(params.output_dir)
    dataset_path = os.path.join(
        params.output_dir,
        params.mode_dataset_name + '_' + params.mode + '.json')

    with open(dataset_path, 'w') as f:
        json.dump(dataset, f)
    print('finished')
Esempio n. 9
0
class LSTM(object):
    input_size = 2
    n_cells = 3
    clf_size = 1
    rprop_plus = 1.4
    rprop_minus = 0.5

    test_x = [
        [
            [0, 1],
            [1, 0],
            [1, 1],
            [1, 1],
        ],
        [
            [0, 0],
            [0, 0],
            [0, 0],
            [0, 0],
        ]
    ]

    test_y = [
        0,
        1,
        0,
        0
    ]

    test_s0 = [[0, 0, 0]] * len(test_x[0])

    def __init__(self, learning_rate):
        self.params = Parameters()
        self.learning_rate = learning_rate

        self.s0 = tt.matrix('s0')
        self.s0.tag.test_value = self.test_s0
        self.params.w_x = initial_weights(self.n_cells, self.input_size)
        self.params.w_f = initial_weights(self.n_cells, self.input_size)
        self.params.w_i = initial_weights(self.n_cells, self.input_size)

        self.params.w_clf = initial_weights(self.clf_size, self.n_cells)
        self.params.b_clf = initial_weights(self.clf_size)

    def build_train(self):
        # Build training function.
        # data_x[t,i,:] = input number i at time t
        data_x = tt.tensor3('data_x')
        data_x.tag.test_value = self.test_x

        # Desired outputs.
        data_y = tt.vector('data_y')
        data_y.tag.test_value = self.test_y

        def process_input(x):
            """
            Go in time over the input sequence encoded by x, return final state.

            Args:
                x: A 3 dimensional tensor [time, example_id, example_dims].

            Return: Matrix with final states for all examples.
            """
            def step(x, s):
                """

                Args:
                    x: A 2 dimensional matrix with input [example_id,
                        example_dims].
                    s: A 2 dimensional matrix with current state [example_id,
                        state_dim].
                """
                c = tt.tanh(tt.tensordot(x, self.params.w_x,
                                         [[1], [1]]))
                g_f =  tt.nnet.sigmoid(tt.tensordot(x, self.params.w_f,
                                                    [[1], [1]]))
                g_i =  tt.nnet.sigmoid(tt.tensordot(x, self.params.w_i,
                                                    [[1], [1]]))

                new_cell = c * g_i
                new_state = s * g_f

                return new_cell + new_state

            # For each timestamp compute the new state. Result should be a 3
            # dimensional tensor.
            res, updates = theano.scan(
                fn=step,
                outputs_info=[self.s0],
                n_steps=x.shape[0],
                sequences=[x]
            )
            res = res[-1]

            return res

        #final_state = process_input(data_x)
        #return function([data_x, self.s0], final_state)

        def clf_state(curr_s):
            """
            For each example, get the classifier output.

            Args:
                curr_s: Matrix of final states for each example [example_id,
                    example_dims.
            """
            return tt.nnet.sigmoid(
                tt.tensordot(self.params.w_clf, curr_s, [[1], [1]]) +
                        self.params.b_clf.dimshuffle(0, 'x'))

        def classify(data_x):
            final_state = process_input(data_x)
            clf = clf_state(final_state)
            return clf

        def build_loss(data_x, y):
            clf = classify(data_x)
            return tt.mean((clf - y)**2)

        def validate(data_x, y):
            return (classify(data_x), build_loss(data_x, y))

        total_loss = build_loss(data_x, data_y)

        self.shapes = []
        grads = []
        grads_history = []
        self.grads_rprop = grads_rprop = []
        grads_rprop_new = []
        for param in self.params.values():
            logging.info('param %s', param.name)

            shape = param.shape.eval()
            self.shapes.append(shape)
            grad = tt.grad(total_loss, wrt=param)
            grads.append(grad)

            # Save gradients histories for RProp.
            grad_hist = theano.shared(ones(shape), name="%s_hist" % param)
            grads_history.append(
                grad_hist
            )

            # Create variables where rprop rates will be stored.
            grad_rprop = theano.shared(ones(shape) * self.learning_rate,
                                       name="%s_rprop" % param)
            grads_rprop.append(grad_rprop)

            # Compute the new RProp coefficients.
            rprop_sign = tt.sgn(grad_hist * grad)
            grad_rprop_new = grad_rprop * (
                tt.eq(rprop_sign, 1) * self.rprop_plus
                + tt.neq(rprop_sign, 1) * self.rprop_minus
            )
            grads_rprop_new.append(grad_rprop_new)


        self.train_step = function(
            inputs=[self.s0, data_x, data_y],
            outputs=[total_loss],
            updates=[
                        # Update parameters according to the RProp update rule.
                        (p, p - rg * tt.sgn(g)) for p, g, rg in zip(
                    self.params.values(),
                    grads,
                    grads_rprop_new)
                    ] + [
                        # Save current gradient for the next step..
                        (hg, g) for hg, g in zip(grads_history, grads)
                    ] + [
                        # Save the new rprop grads.
                        (rg, rg_new) for rg, rg_new in zip(grads_rprop, grads_rprop_new)
                    ]
        )

        #x = tt.matrix('x')
        self.classify = function([self.s0, data_x], classify(data_x))
        self.validation_loss = function([self.s0, data_x, data_y], validate(
            data_x, data_y))
Esempio n. 10
0
        reindex_y_full = pd.concat([reindex_y_full, chunklist_y_full[i + 2]],
                                   ignore_index=True)

    #reset empty space
    chunklist_TT_df_reduced = []
    chunklist_y_full = []

    return reindex_TT_df_reduced, reindex_y_full


# ---------------------------------------------------- Paramaters -----------------------------------------------------------------------------------------------------

# Here we choose the 4X0 geometry, which correspond to SND@LHC pilot run
# You need to change the X/Y half DIM in the .json file to 26.0 and 21.5

params = Parameters("4X0")

# Path to the raw Data root file and the pickle file
filename = "/dcache/bfys/smitra/DS5/DS5.root"
loc_of_pkl_file = "/dcache/bfys/smitra/DS5/new_ship_tt_processed_data_forcompressed"
processed_file_path = os.path.expandvars(loc_of_pkl_file)
name_of_angle_file = "results/Angle_histo.root"
name_of_red_dim_hist = "results/XY_histo.root"

# Usualy, Data file is too large to be read in one time, that the reason why we read it by "chunk" of step_size events
step_size = 3750  # number of event in a chunk
file_size = 150000  # number of events to be analyse. Maximum number for DS5.root is 200'000

n_steps = int(file_size / step_size)  # number of chunks
#nb_of_plane = len(params.snd_params[params.configuration]["TT_POSITIONS"])
nb_of_plane = 2
Esempio n. 11
0
import os
import json
import sqlite3
import codecs

import utils.Parameters as Parameters
import utils.Graph as Graph
import utils.Node as Node
import utils.Edge as Edge
import NodeInfo
import EdgeInfo
import ArgumentDiagram

# paths
parameters = Parameters.Parameters()

# Functions


def getAllFilenamesFromDir(path):

    filesName = []

    for textFile in os.listdir(path):
        if textFile.endswith(".txt"):
            currentFileName = textFile.split(".txt")[0]
            filesName.append(currentFileName)
    return filesName

Esempio n. 12
0
        print(f"train_step:{i} ret: {avg_ret:.1f}")
        with fw.as_default():
            tf.summary.scalar("return", avg_ret, i)


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="Hyper prams")

    parser.add_argument("--replay_size", type=str)
    parser.add_argument("--start_size", type=float)
    parser.add_argument("--gamma", type=float)
    parser.add_argument("--lr_a", type=float)
    parser.add_argument("--batch_size", type=int)
    parser.add_argument("--lr_c", type=float)
    parser.add_argument("--parms_path",
                        type=str,
                        default="parms/Pendulum-v0.json")

    args = parser.parse_args()
    terminal_parms = vars(args)

    # json parms
    import json
    json_parms = json.load(open(terminal_parms['parms_path']))

    json_parms.update({k: terminal_parms[k] for k in terminal_parms \
                        if terminal_parms[k] is not None})
    parms_ = Parameters(json_parms)
    main(parms_)
Esempio n. 13
0
else:
    print('NOT Using CUDA. This can be slow.')

# Preprocessing of dataset
input_lang, output_lang, pairs = prepare_data(None, None, dataset, 'train',
                                              args.maxlength)
input_lang, output_lang, val_pairs = prepare_data(input_lang, output_lang,
                                                  dataset, 'val',
                                                  args.maxlength)
input_lang, output_lang, test_pairs = prepare_data(input_lang, output_lang,
                                                   dataset, 'test',
                                                   args.maxlength)

# Encapsulation of fundamental Parameters
pars = Parameters(input_lang, output_lang, args.maxlength, EOS_token,
                  SOS_token, UNK_token, args.cuda, print_every, plot_every,
                  n_epochs, learning_rate)

# Instantiation of Encoder and Decoder
encoder = EncoderRNN(input_lang.n_words, hidden_size, pars, n_layers)
decoder = AttnDecoderRNN(hidden_size,
                         output_lang.n_words,
                         pars,
                         n_layers,
                         dropout_p=dropout_p)

# Loading of previously trained models
if args.load:
    load_model(encoder, 'encoder')
    load_model(decoder, 'decoder')
Esempio n. 14
0
def run(input_file, parameters_file, output_file, cascade_file, gui):
    data_out = []  # Output variable
    save = True  # Toggle save function
    rec_reset = True  # Toggle reset for analyzing in the cmd

    # Setup default settings if GUI in use
    if gui:
        # Save false, because we don't want to save/overwrite automaticly when in GUI
        save = False
        window = init_gui()
        input_file = Path(window.ask_file("Input video file"))
        output_file = input_file.parent / (input_file.stem + "_analysis.csv")
        parameters_file = input_file.parent / (input_file.stem +
                                               "_analysis.prm")

    # Init Parameters object where all the parameters are saved
    params = Parameters(parameters_file)

    # Loads parameters from file, if exists
    if parameters_file.exists():
        print("Loading parameters from file.")
        params.load_parameters()
    else:
        # No parameter file, so using defaults
        print("No parameters file. Loading default parameters.")
        # Default parameters
        params.params['b_val'] = 118
        params.params['c_val'] = 61
        params.params['thres_val'] = 85
        params.params['blur_val'] = 3
        params.params['pad_val'] = 12
        params.params['area_x'] = 160
        params.params['area_y'] = 160
        params.params['const_track'] = 0
        params.save_parameters()

    # Init capture
    cap = init_capture(input_file)

    # If GUI, set controls and add progress frame counter
    if gui:
        set_controls(window, params.params)
        counter = window.create_progress()

    # Setup filters
    pre_filt = VideoFiltering()
    out_filt = VideoFiltering()
    disp_filt = VideoFiltering()

    # Setup tracking and identify eye area
    tracker = Tracking(copy.deepcopy(cap.frame))
    tracker.haar_classifier(cascadeFile=str(cascade_file))
    haar_pt = tracker.get_tracking_point('haar')
    # while not haar_pt:
    #     print("Did not find eye. Trying again next frame.")
    #     cap.capture_frame()
    #     tracker = Tracking(copy.deepcopy(cap.frame))
    #     tracker.haar_classifier(cascadeFile=str(cascade_file))
    #     haar_pt = tracker.get_tracking_point('haar')

    # Display video stats
    print("\nFrame count of the video: {}".format(int(cap.get_total_frames())))
    print("FPS of the video: {}".format(int(cap.get_fps())))
    print("Runtime in seconds: {}".format(cap.get_lenght_in_s()))

    # Main loop
    # Rework to work with threading and in the tkinter mainloop
    while (cap.capture_open()):

        if not haar_pt:
            print("Did not find eye. Trying again.")
            tracker.haar_classifier(cascadeFile=str(cascade_file))
            haar_pt = tracker.get_tracking_point('haar')

        # Copy current frame to all filters and trackers
        pre_filt.frame = copy.deepcopy(cap.frame)
        out_filt.frame = copy.deepcopy(cap.frame)
        disp_filt.frame = copy.deepcopy(cap.frame)
        # Tracking is done for pre filtered frame
        tracker.frame = copy.deepcopy(pre_filt.frame)

        # Draw bounding box for detected area and select region of interest for the ouput frame
        if haar_pt is not None and haar_pt:
            disp_filt.draw_bounding_box(haar_pt, params.params['pad_val'])
            out_filt.crop_roi(haar_pt, params.params['pad_val'])

        # Filtering the output
        out_filt.blur((params.params['blur_val'], params.params['blur_val']))
        out_filt.brightness_contrast(params.params['b_val'],
                                     params.params['c_val'])
        out_filt.threshold(params.params['thres_val'])
        out_filt.resize((params.params['area_x'], params.params['area_y']))

        # These will be run, if in GUI
        if gui:
            # Updating GUI if window is still open
            if window.get_status():
                update_counter(counter, out_filt, cap)
                update_parameters(window, params)
                window.add_video_frame_left(disp_filt.frame.frame,
                                            disp_filt.frame.get_frame_size())
                window.add_video_frame_right(out_filt.frame.frame,
                                             out_filt.frame.get_frame_size())
                window.update_gui()

            # Check status of preview and analyze buttons in the GUI
            if window.run or window.prev:
                # Resets video before running analysis or previw
                if window.reset or (window.prev
                                    and not out_filt.frame.frame_num <
                                    cap.get_total_frames() - 1):
                    cap.reset()
                    window.reset = False
                elif window.run:
                    # Flag save if analysis started
                    save = True
                    # Update progress in command line and append frame data to end result
                    update_counter_cmd(out_filt, cap)
                    append_frame_data(data_out, out_filt)
                cap.capture_frame()

            # Check parameters save button status and saves
            if window.save_params:
                print("Saving parameters to file.")
                window.save_params = False
                params.save_parameters()

            # Exit capture and program if window not open anymore
            if not window.get_status():
                cap.release_capture()

        # These run only in when in command line
        else:
            if rec_reset:
                cap.reset()
                rec_reset = False
            else:
                update_counter_cmd(out_filt, cap)
                append_frame_data(data_out, out_filt)
            cap.capture_frame()

        # Detect eye location and update if tracking enabled
        if params.params['const_track']:
            tracker.haar_classifier(cascadeFile=str(cascade_file),
                                    minSize=(24, 54))
            haar_pt = tracker.get_tracking_point('haar')

    # Save data to csv if save flag enabled
    if save:
        with open(output_file, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerows(data_out)
Esempio n. 15
0
#------------------------------------------  In the notebook (miss 'from IPython import display' ??)
from net import SNDNet, MyDataset, digitize_signal

device = torch.device("cuda", 0)
from matplotlib import pylab as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
import os
#----------------------------------------- Not in the notebook
import gc
#----------------------------------------- In the notebook
params = Parameters("9X0")
data_preprocessor = DataPreprocess(params)
filename = "/data/ship_tt/9X0_500k.root"
#----------------------------------------- Not in the notebook: it is reading and analysing data by chunk
print("\nReading data now. Be patient...")
processed_file_path = os.path.expandvars("$HOME/ship_tt_processed_data")
os.system("mkdir -p {}".format(processed_file_path))
step_size = 5000
file_size = 500000
n_steps = int(file_size / step_size)

chunklist_TT_df = []
chunklist_y_full = []

for i in tqdm(range(n_steps)):
    gc.collect()
Esempio n. 16
0
    raise Exception("CUDA is not available")
n_devices = torch.cuda.device_count()
print("\nWelcome!\n\nCUDA devices available:\n")
for i in range(n_devices):
    print("\t{}\twith CUDA capability {}".format(
        torch.cuda.get_device_name(device=i),
        torch.cuda.get_device_capability(device=i)))
print("\n")
device = torch.device("cuda", 0)

# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Turn off interactive plotting: for long run it makes screwing up everything
plt.ioff()

# Here we choose the geometry with 9 time the radiation length
params = Parameters(
    "4X0")  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE DIMENTION !!!!!!!!!!!!!!!!
processed_file_path = os.path.expandvars(
    "$HOME/DS5/ship_tt_processed_data"
)  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE PATH !!!!!!!!!!!!!!!!
step_size = 5000  # size of a chunk
file_size = 180000  # size of the BigFile.root file
n_steps = int(file_size / step_size)  # number of chunks

# ------------------------------------------ LOAD THE reindex_TT_df & reindex_y_full PD.DATAFRAME --------------------------------------------------------------------------

chunklist_TT_df = []  # list of the TT_df file of each chunk
chunklist_y_full = []  # list of the y_full file of each chunk

# It is reading and analysing data by chunk instead of all at the time (memory leak problem)
print("\nReading the tt_cleared_reduced.pkl & y_cleared.pkl files by chunk")
#First 2
Esempio n. 17
0
import root_numpy
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
from IPython import display
import os
from ROOT import TH1F, TFile
# Turn off interactive plotting: for long run it makes screwing up everything
plt.ioff()

# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------

# Here we choose the geometry with 9 time the radiation length
params = Parameters(
    "4X0")  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE X/Y DIM !!!!!!!!!!!!!!!!

processed_file_path = os.path.expandvars(
    "$HOME/DS5/ship_tt_processed_data"
)  #!!!!!!!!!!!!!!!!!!!!!CHANGE THE PATH !!!!!!!!!!!!!!!!

step_size = 5000  # size of a chunk
file_size = 180000  # size of the BigFile.root file
n_steps = int(file_size / step_size)  # number of chunks
nb_of_plane = len(params.snd_params[params.configuration]["TT_POSITIONS"])

# ----------------------------------------- PRODUCE THE tt_cleared.pkl & y_cleared.pkl IN ship_tt_processed_data/ FOLDER -------------------------------------------------
'''
# It is reading and analysing data by chunk instead of all at the time (memory leak problem)
print("\nProducing the tt_cleared.pkl & y_cleared.pkl files by chunk")
data_preprocessor = DataPreprocess(params)
Esempio n. 18
0
    q_gui = {
        "master2gui": q_master["master2gui"],
        "gui2master": q_master["gui2master"]
    }
    q_robots = {
        "robots2messenger": q_master["robots2messenger"],
        "messenger2robots": q_master["messenger2robots"]
    }
    q = {"master": q_master, "robots": q_robots, "gui": q_gui}
    return Container(q)


#---------------
if __name__ == '__main__':

    params = Parameters(path="../config.yaml")
    mean_time = []
    std_time = []
    for lambda_ in np.linspace(0, 2, 21):
        times = []
        n_failures = 0
        for i in range(10):
            q = make_queues()
            env = Environment()
            gt = set(env.ground_truth)
            robot1 = Robot(1, q.robots, env)
            robot2 = Robot(2, q.robots, env)
            master = NaiveMaster(params, q.master, lambda_=lambda_)
            robot1.start()
            robot2.start()
            master.start()
Esempio n. 19
0
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", default="cifar10_linf_AT", type=str)
    parser.add_argument("--model", default="PreActResNet18")
    parser.add_argument("--batch-size", type=int)
    parser.add_argument("--dataset", type=str)
    parser.add_argument("--data-dir", default="~/datasets/", type=str)
    parser.add_argument("--max-epoch", type=int)
    parser.add_argument("--epoch", default=1, type=int)
    parser.add_argument("--defense", type=str)
    parser.add_argument("--attack", type=str)
    parser.add_argument("--inner-loss", type=str)
    parser.add_argument("--outer-loss", type=str)
    parser.add_argument("--log-step", type=int)
    parser.add_argument("--lr", type=float)
    parser.add_argument("--lr-adjust", type=str)
    parser.add_argument("--weight-decay", type=float)
    parser.add_argument("--epsilon", type=int)
    parser.add_argument("--attack-iters", type=int)
    parser.add_argument("--pgd-alpha", type=float)
    parser.add_argument("--norm", type=str, choices=["l_inf", "l_2"])
    parser.add_argument("--fname", type=str)
    parser.add_argument("--seed", default=0, type=int)
    parser.add_argument("--resume-checkpoint", default="", type=str)
    parser.add_argument("--tensorboard", action="store_true")
    parser.add_argument("--project", default="AT-Framework", type=str)
    parser.add_argument("--no-amp", action="store_true")
    parser.add_argument("--gpu", default="0", type=str)
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    torch.backends.cudnn.benchmark = True

    # torch.backends.cudnn.enabled = True   # 默认值
    # torch.backends.cudnn.benchmark = False  # 默认为False
    # torch.backends.cudnn.deterministic = True

    import configs

    try:
        config = getattr(configs, args.config + "_config")
        args = vars(args)
        args = {**config, **{k: args[k] for k in args if args[k] is not None}}
        args = Parameters(args)
    except Exception:
        raise NotImplementedError(f"No such configuration: {args.config}")

    args.data_dir = f"{args.data_dir}/{args.dataset}"

    args.fname = args.fname + "_" + args.model
    args.checkpoints = args.fname + "_checkpoints"

    current_time = time.strftime("%Y-%m-%d %H-%M-%S", time.localtime())
    args.fname = args.fname + "/" + current_time
    args.checkpoints = args.checkpoints + "/" + current_time

    output_dir = "Outputs/"
    args.fname = output_dir + args.fname
    args.checkpoints = output_dir + args.checkpoints

    return args