コード例 #1
0
def handler(event, context):
    """
    entry point for Lambda function
    :param event: the Lambda event
    :param context: the Lambda context
    :return: None
    """

    print(f"'event': {event}")
    print(f"'context': {context}")

    # -----------------------------------------------------
    # EXTRACT

    # define ny_dataset
    ny_dataset = classes.Dataset("ny_dataset")
    ny_dataset.headers_all = ["date", "cases", "deaths"]
    ny_dataset.headers_key = ny_dataset.headers_all
    ny_dataset.match_field = "date"
    ny_dataset.source_url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv"

    # extract and print ny_dataset
    ny_dataset.df = extract.extract(ny_dataset.source_url)
    print(f"'ny_dataset.df':\n{ny_dataset.df}")

    # define jh_dataset
    jh_dataset = classes.Dataset("jh_dataset")
    jh_dataset.headers_all = [
        "Date", "Country/Region", "Province/State", "Lat", "Long", "Confirmed",
        "Recovered", "Deaths"
    ]
    jh_dataset.headers_key = ["Date", "Country/Region", "Recovered"]
    jh_dataset.match_field = "Date"
    jh_dataset.source_url = \
        "https://raw.githubusercontent.com/datasets/covid-19/master/data/time-series-19-covid-combined.csv"

    # extract and print jh_dataset
    jh_dataset.df = extract.extract(jh_dataset.source_url,
                                    jh_dataset.headers_key, "Country/Region",
                                    "US")
    print(f"'jh_dataset.df':\n{jh_dataset.df}")

    # -----------------------------------------------------
    # TRANSFORM

    # transform the datasets into CovidStat Instances
    covid_stats = transform.transform(ny_dataset, jh_dataset)

    # print CovidStats
    print(*covid_stats, sep="\n")

    # -----------------------------------------------------
    # LOAD

    # load CovidStat instances into the CovidStats DynamoDB table
    load.load_all(classes.CovidStat, covid_stats)
    load.load_json(covid_stats)
コード例 #2
0
def main(trace_directory, trace_name, number):
    trace_directory = abspath(trace_directory)
    ptfile = '%s/pt_%s.txt' % (trace_directory, trace_name)

    pt_data = load_all(ptfile, number=number)[:-1]
    textin = np.array(pt_data)
    traces = load_traces('%s/avg_%s' % (trace_directory, trace_name), number)

    assert traces.shape[0] == textin.shape[0]

    with open('tracefile', 'wb') as f:
        for t in traces.flatten():
            f.write(struct.pack('f', t))

    with open('plaintext', 'wb') as f:
        for t in textin.flatten():
            f.write(struct.pack('B', t))

    print("Add following to CONFIG:\n")
    print("[Traces]")
    print("files=1")
    print("trace_type=f")
    print("transpose=true")
    print("index=0")
    print("nsamples=%d" % traces.shape[1])
    print("trace=tracefile %d %d" % (traces.shape[0], traces.shape[1]))
    print("")
    print("[Guesses]")
    print("files=1")
    print("guess_type=u")
    print("transpose=true")
    print("guess=plaintext %d %d" % (textin.shape[0], textin.shape[1]))
コード例 #3
0
def feature_aegan(aegan, modelname, protoname):
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix=modelname)

        x = transform(load_all(protoname, (npxw, npxh)))
        code = aegan.autoencoder.encoder.predict(x)

    ipdb.set_trace()
コード例 #4
0
    def test_load(self):
        """
        :return: pass or fail if the ...
        """
        print("test_load")
        size = 25
        sample_date = list()
        for _ in range(size):
            sample_date.append(create_random_instance())

        load_response = load.load_all(CovidStatTest, sample_date)
        self.assertEqual(len(load_response), size)
コード例 #5
0
def cli(data_path, num_traces, start_point, end_point, plot, num_key_bytes,
        bruteforce, name):
    """
    Run an attack against previously collected traces.

    Each attack is a separate subcommand. The options to the top-level command
    apply to all attacks; see the individual attacks' documentation for
    attack-specific options.
    """
    global PLOT, NUM_KEY_BYTES, BRUTEFORCE, PLAINTEXTS, TRACES, KEYFILE
    PLOT = plot
    NUM_KEY_BYTES = num_key_bytes
    BRUTEFORCE = bruteforce
    PLAINTEXTS = load_all(path.join(data_path, 'pt_%s.txt' % name))
    TRACES = load_traces(path.join(data_path, 'avg_%s' % name),
                         num_traces or len(PLAINTEXTS),
                         start_point, end_point)
    KEYFILE = path.join(data_path, 'key_%s.txt' % name)
コード例 #6
0
    for index, time in enumerate(times[1:], start=1):
        diff = time - times[index - 1]
        # 30 minutes for one view
        if diff > 30 * 60 * 1000 or index == len(times) - 1:
            blockTimes.append(times[index - 1] - blockBegin)
            spentTime += times[index - 1] - blockBegin
            blockBegin = time
            blocks += 1
            blockStop.append(index - 1)

    print(f'{name}: {len(times)} entries, {blocks} blocks')
    print(f'Block avg: {displayTime(np.average(blockTimes))}')
    print(f'Entry avg: {displayTime(np.average(spentTime//len(times)))}')
    print(f'Total:     {displayTime(spentTime)}')
    print(f'Skips:     {skip:>7}')

    return spentTime


dataAll = load_all(args.rating_dir)
total = 0
for (name, data) in dataAll.items():
    total += time_single(name, data)
    print()

print(f'Total:')
print(f'Time:       {displayTime(total)}')
print(f'Entries:    {totalEntries:>7}')
print(f'Avg.:       {displayTime(total/totalEntries)}')
print(f'Model avg.: {displayTime(total/totalEntries/13)}')
print(f'Faulty:     {totalFaulty:>7}')
コード例 #7
0
def tra_create(template_dir, num_pois, poi_spacing):
    """
    Template Radio Analysis; create a template.

    The data set should have a considerable size in order to allow for the
    construction of an accurate model. In general, the more data is used for
    template creation the less is needed to apply the template.

    The template directory is where we store multiple files comprising the
    template; beware that existing files will be overwritten!
    """
    try:
        os.makedirs(template_dir)
    except OSError:
        # Checking the directory before attempting to create it leads to race
        # conditions.
        if not path.isdir(template_dir):
            raise

    if PLOT:
        plt.plot(np.average(TRACES,axis=0),'b')
        plt.show()

    tempKey = load_all(KEYFILE)
    fixed_key = (np.shape(tempKey)[0] == 1)
 
    for knum in range(NUM_KEY_BYTES):
        if(fixed_key):
            tempSbox = [sbox[PLAINTEXTS[i][knum] ^ tempKey[0][knum]] for i in range(len(TRACES))]
        else:
            tempSbox = [sbox[PLAINTEXTS[i][knum] ^ tempKey[i][knum]] for i in range(len(TRACES))]

        tempHW = [hw[s] for s in tempSbox]
        
        # Sort traces by HW
        # Make 9 blank lists - one for each Hamming weight
        tempTracesHW = [[] for _ in range(9)]
        
        # Fill them up
        for i, trace in enumerate(TRACES):
            HW = tempHW[i]
            tempTracesHW[HW].append(trace)

        # Check to have at least a trace for each HW
        for HW in range(9):
            assert len(tempTracesHW[HW]) != 0, "No trace with HW = %d, try increasing the number of traces" % HW

        # Switch to numpy arrays
        tempTracesHW = [np.array(tempTracesHW[HW]) for HW in range(9)]

        # Find averages
        tempMeans = np.zeros((9, len(TRACES[0])))
        for i in range(9):
            tempMeans[i] = np.average(tempTracesHW[i], 0)

        # Find sum of differences
        tempSumDiff = np.zeros(len(TRACES[0]))
        for i in range(9):
            for j in range(i):
                tempSumDiff += np.abs(tempMeans[i] - tempMeans[j])
        
        if PLOT:
            plt.plot(tempSumDiff,label="subkey %d"%knum)
            plt.legend()

        # Find POIs
        POIs = []
        for i in range(num_pois):
            # Find the max
            nextPOI = tempSumDiff.argmax()
            POIs.append(nextPOI)
            
            # Make sure we don't pick a nearby value
            poiMin = max(0, nextPOI - poi_spacing)
            poiMax = min(nextPOI + poi_spacing, len(tempSumDiff))
            for j in range(poiMin, poiMax):
                tempSumDiff[j] = 0

        # Fill up mean and covariance matrix for each HW
        meanMatrix = np.zeros((9, num_pois))
        covMatrix  = np.zeros((9, num_pois, num_pois))
        for HW in range(9):
            for i in range(num_pois):
                # Fill in mean
                meanMatrix[HW][i] = tempMeans[HW][POIs[i]]
                for j in range(num_pois):
                    x = tempTracesHW[HW][:,POIs[i]]
                    y = tempTracesHW[HW][:,POIs[j]]
                    covMatrix[HW,i,j] = cov(x, y)

        with open(path.join(template_dir, 'POIs_%d' % knum), 'wb') as fp:
            pickle.dump(POIs, fp)
        with open(path.join(template_dir, 'covMatrix_%d' % knum), 'wb') as fp:
            pickle.dump(covMatrix, fp)
        with open(path.join(template_dir, 'meanMatrix_%d' % knum), 'wb') as fp:
            pickle.dump(meanMatrix, fp)

    if PLOT:
        plt.show()
コード例 #8
0
ファイル: crf.py プロジェクト: timforby/dnet
import os
import sys
import numpy as np
import argparse
from scipy import misc
import load, proc, densecrf
import classes as cl

ar = argparse.ArgumentParser()
ar.add_argument('path1', type=str, help="Path to label images")
args = ar.parse_args()
path1 = args.path1

path1 = path1 + '/'
path2 = '../../data/real/'
result = path1 + 'crf/'
if not os.path.exists(result):
    os.makedirs(result)
labels = load.load_data_simp([path1])
set_imgs = load.load_all(path2, comp=True)
x_img = set_imgs[1]
x_img = proc._join(x_img, set_imgs[2:])

for i in range(len(x_img)):
    x_i = x_img[i]
    label = labels[0][i]
    crflabel = densecrf.crf(label, x_i, classes=2, ls=[0, 3])
    crflabel = cl.declassimg2(crflabel, map=True)
    number = str(i).zfill(4)
    misc.imsave(result + number + ".png", crflabel)
コード例 #9
0
    robot_data_list = list()

    for marker in robot_markers:
        marker_data = data_by_marker_name(marker, data, metadata)
        robot_data_list.append(marker_data)
    robot_data = np.stack(robot_data_list, axis=2)
    robot_data = np.nanmean(robot_data, axis=2)

    human_data = data_by_marker_name("UsersHead", data, metadata)

    distance = np.linalg.norm(robot_data - human_data, axis=1)
    return distance


if __name__ == "__main__":
    by_id, by_position = load_all("../tsv_files/*.tsv")

    #plot comfort
    #"""
    def plot_comfort(trial):
        metadata = trial["metadata"]
        data = trial["data"]
        eps = 1e-8

        comfort = get_comfort(data, metadata)
        plt.plot(comfort)

    user_data = by_id[4]
    for position in ["A", "B", "C", "D", "E"]:
        plot_comfort(user_data[position])
    plt.show()
コード例 #10
0
ファイル: test_edge.py プロジェクト: timforby/dnet
        loc += 2
        
    for p in range(ps[1],x_i.shape[1]//2,ps[1]):#vertup
        result_img[0:ps[0],p:p+ps[1],:] = result[loc]
        result_img[-ps[0]:,p:p+ps[1],:] = result[loc+1]
        loc += 2
        
    for p in range(x_i.shape[1]-ps[1],x_i.shape[1]//2,-ps[1]):#vertup
        result_img[0:ps[0],p-ps[1]:p,:] = result[loc]
        result_img[-ps[0]:,p-ps[1]:p,:] = result[loc+1]         
        loc += 2
    return result_img
    
#----Load Data---

set_imgs = load.load_all(REAL_PATH,comp=comp)
print("Loading done")
print("Pre-processing")
x_img = set_imgs[1]
y_img = set_imgs[0]
x_img = proc._join(x_img,set_imgs[2:])  
_d = x_img[0].shape[2]

#hnv_imgs = load.load_data_simp([REAL_PATH+'hnv_ng'])
#x_img = proc._join(x_img,hnv_imgs[0:1],depth=2)


model = load_model(TRAIN_PATH+'/model.hdf5',compile=False)
print("Model loaded")

_w = patch_size[0]
コード例 #11
0
ファイル: aegan_reid.py プロジェクト: GRSEB9S/Keras-GAN-2
            plt.savefig('test/{}.pdf'.format(ind))
            plt.clf()

    ipdb.set_trace()


if __name__ == '__main__':
    nbatch = 128
    nmax = nbatch * 100
    npxw, npxh = 64, 128

    from load import people, load_all
    va_data, tr_stream, _ = people(pathfile='protocol/cuhk01-train.txt',
                                   size=(npxw, npxh),
                                   batch_size=nbatch)
    allx = transform(load_all('protocol/cuhk01-train.txt', (npxw, npxh)))

    g = Generator(g_size=(3, npxh, npxw),
                  g_nb_filters=128,
                  g_nb_coding=5000,
                  g_scales=4,
                  g_init=InitNormal(scale=0.002))  #, g_FC=[5000])
    d = Discriminator(d_size=g.g_size,
                      d_nb_filters=128,
                      d_scales=4,
                      d_init=InitNormal(scale=0.002))  #, d_FC=[5000])

    # init with autoencoder
    ae = Autoencoder(g, d)
    #   ae.fit(tr_stream,
    #           save_dir='./samples/reid_aegan_5000/ae/',
コード例 #12
0
#!/usr/bin/env python3

from load import load_all
import numpy as np

raise Exception("This file does not work with the new Pandas update.")

data = load_all()

modelCDTn = 'CUNI-DocTransformer'
modelCTTn = 'CUNI-T2T-2018'
modelREFn = 'ref'

for (userKey, user) in data.items():
    for (docKey, document) in user.items():
        for (line, line) in document.items():

            modelCDT = line[modelCDTn]
            modelCTT = line[modelCTTn]
            modelREF = line[modelREFn]
            if 'fluency' not in modelCDT or 'adequacy' not in modelCDT:
                continue
            if 'fluency' not in modelCTT or 'adequacy' not in modelCTT:
                continue
            if 'fluency' not in modelREF or 'adequacy' not in modelREF:
                continue
            scoreCDT = float(modelCDT['fluency']) * float(modelCDT['adequacy'])
            scoreCTT = float(modelCTT['fluency']) * float(modelCTT['adequacy'])
            scoreREF = float(modelREF['fluency']) * float(modelREF['adequacy'])

            if scoreCDT > scoreREF + 0.5 and scoreCDT > scoreCTT + 0.5:
コード例 #13
0
ファイル: tester.py プロジェクト: timforby/dnet
def setup(device, name, interval, patch_size, comp, labels):

    TRAIN_PATH = OUT_PATH + name
    if not os.path.exists(TRAIN_PATH):
        print("Model does not exist")
        sys.exit(0)

    append = '_test' if not comp else '_test_comp'
    RESULT_PATH_BASE = TRAIN_PATH + append
    '''
    if os.path.exists(RESULT_PATH_BASE):
        i = 0
        while os.path.exists(RESULT_PATH_BASE):
            RESULT_PATH_BASE = TRAIN_PATH+append+'_'+str(i)
            i += 1
    '''
    if not os.path.exists(RESULT_PATH_BASE):
        os.makedirs(RESULT_PATH_BASE)
    RESULT_PATH_BASE = RESULT_PATH_BASE + '/' + str(interval)

    os.makedirs(RESULT_PATH_BASE)
    os.makedirs(RESULT_PATH_BASE + '/crf')
    os.makedirs(RESULT_PATH_BASE + '/avg')

    #----KERAS ENV-------

    os.environ["THEANO_FLAGS"] = 'device=' + device
    sys.setrecursionlimit(50000)

    import load, proc, classes, math
    from keras.models import Model, load_model

    #----Load Data---
    set_imgs = load.load_all(REAL_PATH, comp=comp)
    print("Loading done")
    print("Pre-processing")
    x_img = set_imgs[1]
    y_img = set_imgs[0]
    x_img = proc._join(x_img, set_imgs[2:])
    _d = x_img[0].shape[2]

    #hnv_imgs = load.load_data_simp([REAL_PATH+'hnv_ng'])
    #x_img = proc._join(x_img,hnv_imgs[0:1],depth=2)

    model = load_model(TRAIN_PATH + '/model.hdf5', compile=False)
    print("Model loaded")

    _w = patch_size[0]
    _h = patch_size[1]
    _w_out = _w
    out_depth = None
    ranges = range(-(patch_size[0] // 2), (patch_size[0] // 2), interval)
    #ranges = range(0,(patch_size[0]//2),interval)
    offsets = []
    for xoff in ranges:
        for yoff in ranges:
            if yoff == xoff:
                offsets.append((xoff, yoff))

    i = -1
    '''for x_i in x_img:
コード例 #14
0
def generate_dataset():
    by_id, by_position = load_all("../tsv_files/*.tsv")

    positions = ["A", "B", "C", "D", "E"]
    dataset = np.empty((0, 10))  # magic number, cuz I'm a wizzard ya know? =)
    for user, data in enumerate(by_id):
        for position, trial in data.items():
            metadata = trial["metadata"]
            raw_data = trial["data"]
            eps = 1e-8

            #frame counter
            frames = raw_data[:, 0] - raw_data[0, 0]

            #robot position
            robot_markers = [
                "DoubleBottom", "DoubleFaceBottomRight", "DoubleFaceTopRight",
                "DoubleFaceTopLeft", "DoubleFaceBottomLeft"
            ]
            marker_positions = list()
            for marker in robot_markers:
                try:
                    marker_position = np.stack(marker_projection(
                        marker, raw_data, metadata),
                                               axis=1)
                except TypeError:
                    print(user, position, metadata)
                    raise
                marker_positions.append(marker_position)
            marker_positions = np.stack(marker_positions, axis=2)
            robot_pos = np.nanmean(marker_positions, axis=2)

            #participant's comfort during the approach (lower is better)
            comfort = get_comfort(raw_data, metadata)

            #participant's height
            height = participant_height(raw_data, metadata)
            if not type(height) is np.ndarray:
                # height is 0.0 because head markers couldn't be found
                height = np.array([np.nan] * robot_pos.shape[0])

            #human position
            human_pos = np.stack(marker_projection("UsersHead", raw_data,
                                                   metadata),
                                 axis=1)

            # distance -- euclidean distance
            distance = get_distance(raw_data, metadata)

            #position and user label
            pos_label = np.array([positions.index(position)] *
                                 robot_pos.shape[0])
            user_label = np.array([user] * robot_pos.shape[0])

            #stack the data together
            data_rows = [
                frames, robot_pos[:, 0], robot_pos[:, 1], distance,
                human_pos[:, 0], human_pos[:, 1], height, comfort, pos_label,
                user_label
            ]
            partial_dataset = np.stack(data_rows, axis=1)
            dataset = np.append(dataset, partial_dataset, axis=0)
    return dataset
コード例 #15
0
    train_val_splitting
from define_metrics import Metrics_DDI_classification, multi_micro_f1_, \
    get_callbacks_list
from utils import argparser, combine_params
from network_functions import define_network, train_network, get_predictions
from calculate_metrics import get_metrics_cv

if __name__ == '__main__':

    in_path, out_path, out_path_figure = argparser()
    #in_path = '/content/gdrive/My Drive/Master/Subjects/TFM/code/DOC/two_steps/second_step/'

    ## 1. Load embedding, position matrices and word2int_dict of train set,
    # train and test sets
    emb_matrix,pos_matrix,w2i_dict,VOCAB_SIZE,WV_VECTOR_SIZE,POS_VECTOR_LENGTH,\
        MAX_SENTENCE_LENGTH,X_train,y_train,X_test,y_test = load_all(in_path)

    ## 2. Preprocessing
    label2int = {'INT': 0, 'ADVISE': 1, 'EFFECT': 2, 'MECHANISM': 3}
    X_train_i_pad, X_train_p1_pad, X_train_p2_pad, y_train_1h, y_train_i = \
        prepro(X_train,y_train, w2i_dict, label2int, MAX_SENTENCE_LENGTH)
    X_test_i_pad, X_test_p1_pad, X_test_p2_pad, y_test_1h, y_test_i = \
        prepro(X_test,y_test, w2i_dict, label2int, MAX_SENTENCE_LENGTH,
               test=True, y_trainset=y_train)
    original_train_data = y_train_i, y_train_1h, X_train_i_pad, X_train_p1_pad, X_train_p2_pad

    ## 3. Define metrics
    #out_path = "/content/gdrive/My Drive/Master/Subjects/TFM/code/DOC/two_steps/second_step/new/"
    metrics_callbacks = Metrics_DDI_classification()

    ## 4. Define network parameters