예제 #1
0
    def __init__(self,w=11,h=11):
        self.content = utils.create_array(w,h,0)
        self.sprite_content = utils.create_array(w,h, None)
        self.width = w
        self.height = h
        
        self.list_entities = []
        self.list_entities.append(entities.living_entity(10, 10))

        for x in range(w):
            for y in range(h):
                self.sprite_content[x][y] = res.create_sprite(res.tile_floor, x * 8, y * 8, group=res.group_floor)
예제 #2
0
    def write_block_get_ber(self, block_idx):
        """Writes random data to all pages in specified block and then compares data written to data intended to be written."""
        random_data = utils.create_array("random",
                                         self.PageSize,
                                         filename="random_input.bin")
        input_data = "".join(map(chr, random_data))

        self.write_block(block_idx, input_data, per_page=True)

        self.read_block(block_idx,
                        remove_oob=True,
                        comparison_file="random_input.bin",
                        compare_per_page=True)

        pages = []
        ber = []

        with open(self.IDString.rstrip() + "_BER.txt", "r") as error_data:
            plot = csv.reader(error_data, delimiter=",")
            for row in plot:
                pages.append(row[1])
                ber.append(row[2])

        pyplot.plot(pages, ber)
        pyplot.xlabel("Page")
        pyplot.ylabel("Block Error Rate")
        pyplot.title("BER per Page")
        pyplot.show()
예제 #3
0
def get_centrality(graph, method, topk=None):

    if method == "edge_betweeness_centrality":
        output = nx.edge_betweenness_centrality(graph)
    elif method == "betweenness_centrality":
        output = nx.betweenness_centrality(graph)
    elif method == "closeness_centrality":
        output = nx.closeness_centrality(graph)
    elif method == "eigenvector_centrality":
        output = nx.eigenvector_centrality(graph)
    elif method == "in_degree_centrality":
        output = nx.in_degree_centrality(graph)
    elif method == "out_degree_centrality":
        output = nx.out_degree_centrality(graph)
    elif method == "pagerank":
        output = pagerank(graph)
    else:
        return
    print(len(output))
    output = np.array(create_array(output))
    mean = round(np.mean(output), 4)
    if topk:
        arg_sorted_results = np.argsort(output)[::-1][:topk]
    else:
        arg_sorted_results = np.argsort(output)[::-1]

    return output, arg_sorted_results, mean
예제 #4
0
def testing(dataloader):
    # load
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    path = os.path.join(args.store_dir, 'model.pth.tar')
    model = EncoderDecoderConvLSTM(nf=args.n_hidden_dim, in_chan=1)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(path))
    model.to(device)
    # test
    criterion = nn.MSELoss()
    print(f'Started testing on {device}')
    with torch.no_grad():
        for batch in dataloader:
            batch = batch.to(device)
            x, y = batch[:, 0:10, :, :, :], batch[:, 10:, :, :, :].squeeze()
            y_hat = model(x, future_seq=10).squeeze()
            testing_loss = criterion(y_hat, y)
            video_frames = create_array(y_hat, y)
            generate_video(video_array=video_frames,
                           video_filename=args.store_dir + '/result.avi')
            break  # only evaluate one batch
    return testing_loss.cpu()
예제 #5
0
    dnn_features += ["m_ggj_", "m_jjj_"]

if do_dnn:
    print("Calculating dnn scores")
    print((len(dnn_features)))
    print([feat for feat in dnn_features])
    i = 0
    print(dnn_features)
    for dnn_model in dnn_models:
        with open(dnn_model, "r") as f_in:
            model = json.load(f_in)

        dnn_features_data = dnn_helper.DNN_Features(
            name='data',
            global_features=utils.create_array(features_data, dnn_features,
                                               model["preprocess_scheme"],
                                               True),
            objects=utils.preprocess_array(
                utils.pad_array(features_data["objects_"]),
                model["preprocess_scheme"]))
        #print dnn_features_data.global_features
        #print dnn_features_data.objects
        #print dnn_features_data.features
        dnn_features_validation = dnn_helper.DNN_Features(
            name='validation',
            global_features=utils.create_array(features_validation,
                                               dnn_features,
                                               model["preprocess_scheme"],
                                               True),
            objects=utils.preprocess_array(
                utils.pad_array(features_validation["objects_"]),
예제 #6
0
    with open(preprocess_scheme) as f_in:
        preprocess_scheme = json.load(f_in)

print("Preprocessing scheme: ", preprocess_scheme)

dnn_branches = []

if do_dnn:
    print("Calculating dnn scores")
    print((len(dnn_features)))
    print([feat for feat in dnn_features])
    i = 0
    for dnn_model in dnn_models:
        with open(dnn_model, "r") as f_in:
            model = json.load(f_in)
        dnn_features_data = dnn_helper.DNN_Features(name = 'data', global_features = utils.create_array(features_data, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_data["objects_"]), model["preprocess_scheme"]))
        dnn_features_validation = dnn_helper.DNN_Features(name = 'test', global_features = utils.create_array(features_test, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_test["objects_"]), model["preprocess_scheme"]), lumi = features_test["lumi_"], run = features_test["run_"], evt = features_test["evt_"])
        dnn_features_train = dnn_helper.DNN_Features(name = 'train', global_features = utils.create_array(features_train, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_train["objects_"]), model["preprocess_scheme"]))

        dnn = dnn_helper.DNN_Helper(features_validation = dnn_features_validation, features_train = dnn_features_train, features_data = dnn_features_data, metadata = model, weights_file = "dnn_weights/" + model["weights"], train_mode = False)
        dnn_predictions.append(dnn.predict(debug=True))
        training_features.append("dnn_score_%d" % i)
        dnn_branches.append("dnn_score_%d" % i)
        i += 1

print(dnn_predictions)

preprocess_dict = {}
if args.z_score:
    for feat in training_features:
        mean, std = utils.get_mean_and_std(features_train[feat])
예제 #7
0
#if args.do_top_tag:
#    dnn_features += ["top_tag_score_"]
if (args.fcnc_hut or args.fcnc_hct) and args.channel == "Hadronic" and not args.no_mass_constraint:
    dnn_features += ["m_ggj_", "m_jjj_"] 

if do_dnn:
  print("Calculating dnn scores")
  print((len(dnn_features)))
  print([feat for feat in dnn_features])
  i = 0
  print(dnn_features)
  for dnn_model in dnn_models:
    with open(dnn_model, "r") as f_in:
      model = json.load(f_in)

    dnn_features_data = dnn_helper.DNN_Features(name = 'data', global_features = utils.create_array(features_data, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_data["objects_"]), model["preprocess_scheme"]))
    #print dnn_features_data.global_features
    #print dnn_features_data.objects
    #print dnn_features_data.features
    dnn_features_validation = dnn_helper.DNN_Features(name = 'validation', global_features = utils.create_array(features_validation, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_validation["objects_"]), model["preprocess_scheme"]))
    dnn_features_final_fit = dnn_helper.DNN_Features(name = 'final_fit', global_features = utils.create_array(features_final_fit, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_final_fit["objects_"]), model["preprocess_scheme"]))
    dnn_features_train = dnn_helper.DNN_Features(name = 'train', global_features = utils.create_array(features, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features["objects_"]), model["preprocess_scheme"]))

    dnn = dnn_helper.DNN_Helper(features_validation = dnn_features_validation, features_train = dnn_features_train, features_data = dnn_features_data, features_final_fit = dnn_features_final_fit, metadata = model, weights_file = "dnn_weights/" + model["weights"], train_mode = False)
    #dnn.predict()
    #dnn_predictions.append([dnn.predictions["train"], dnn.predictions["validation"], dnn.predictions["data"]])
    dnn_predictions.append(dnn.predict(debug=True))
    feature_names.append("dnn_score_%d" % i)
    i += 1 

print(dnn_predictions)
예제 #8
0
        feature_names.remove(name)

print("Here are the ordered global features:", feature_names)

if args.z_score:
    preprocess_dict = {}
    for feature in feature_names:
        if ("objects_" not in feature and "leptons_" != feature
                and "jets_" != feature):
            mean, stddev = utils.get_mean_and_std(features[feature])
            preprocess_dict[feature] = {
                "mean": float(mean),
                "std_dev": float(stddev)
            }

global_features = utils.create_array(features, feature_names, preprocess_dict,
                                     args.z_score)
global_features_validation = utils.create_array(features_validation,
                                                feature_names, preprocess_dict,
                                                args.z_score)
global_features_data = utils.create_array(features_data, feature_names,
                                          preprocess_dict, args.z_score)
global_features_final_fit = utils.create_array(features_final_fit,
                                               feature_names, preprocess_dict,
                                               args.z_score)

object_features = utils.pad_array(object_features)
object_features_validation = utils.pad_array(object_features_validation)
object_features_data = utils.pad_array(object_features_data)
object_features_final_fit = utils.pad_array(object_features_final_fit)

if args.z_score: