コード例 #1
0
ファイル: app.py プロジェクト: lsingh123/ia_webmeter_viz
def make_collection_viz(collection, date):
    session = requests.Session()
    response = session.get(
        'http://localhost:8300/dump/{date}'.format(date=date))
    json = response.json()

    try:
        newscrawl = json['collection'][collection]
    except KeyError:
        error = "ERROR: {coll} NOT FOUND ".format(coll=collection)
        return render_template("error.html", error=error)

    try:
        domains = make_data(newscrawl['domains'], "domain", "size")
        mimetypes = make_data(newscrawl['mimetypes'], "mimetype", "size")
        statuscodes = make_data(newscrawl['statuscodes'], "statuscode", "size")
        hovers = [create_hover_tool('size_human') for i in range(3)]

        plot1 = create_bar_chart(
            domains,
            date + " Domains for Collection {coll}".format(coll=collection),
            "domain",
            "size",
            "Domain",
            "Captures",
            hover_tool=hovers[0])
        plot2 = create_bar_chart(
            mimetypes,
            date + " Mimetypes for Collection {coll}".format(coll=collection),
            "mimetype",
            "size",
            "Mimetype",
            "Captures",
            hover_tool=hovers[1],
            width=700)
        plot3 = create_bar_chart(
            statuscodes,
            date +
            " Statuscodes for Collection {coll}".format(coll=collection),
            "statuscode",
            "size",
            "Statuscodes",
            "Captures",
            hover_tool=hovers[2],
            width=700)
        plots = [components(plot1), components(row(plot2, plot3))]

        divs = [{"div": div, "script": script} for script, div in plots]

        return render_template("viz.html",
                               collection=collection,
                               divs=divs,
                               date=date,
                               collections=colls)
    except:
        error = "ERROR IN GETTING DATA FOR {coll}".format(coll=collection)
        return render_template("error.html", error=error)
コード例 #2
0
def send_list(s_socket, CID, address):
    global client_table
    #print(client_table)
    for key in client_table:
        data = utils.make_data(0, [key, client_table[key][0]])
        s_socket.send_data(address, data)

    for key in client_table:
        #print(key, client_table[key][0])
        data = utils.make_data(0, [key, client_table[key][0]])
        #print(address)
        s_socket.send_data(address, data)
コード例 #3
0
ファイル: run.py プロジェクト: mitsuhiko-nozawa/atmacup_08
 def __call__(self):
     module = make_data(self.param["common_param"])
     module = self.Prepro(module)
     module = self.Train(module)
     module = self.Infer(module)
     self.Logger(module)
     print("success")
コード例 #4
0
def send_msg(socket, address, cid, msg):
    #print('addr',address[0],address[1])
    global client_table
    if cid not in client_table:
        return
    msg = ''.join(msg)
    #print('msg', msg)
    data = utils.make_data(2, [cid, msg])
    socket.send_data(address, data)
コード例 #5
0
def send_CID(s_socket, mode, msg):
    # mode
    # 0 : created
    # 1 : removed
    # msg = [CID, Address]
    global client_table
    data = utils.make_data(mode, msg)
    for key in client_table:
        s_socket.send_data(client_table[key][0],
                           data)  # client_table[key] : [adress, timer]
コード例 #6
0
def load_data(data_dir, dataset='mnist'):
    if dataset == 'mnist':
        from tensorflow.examples.tutorials.mnist import input_data
        data = input_data.read_data_sets(data_dir, one_hot=True)

        data.test.cls = np.argmax(data.test.labels, axis=1)
    else:
        cifar10.data_path = data_dir
        cifar10.maybe_download_and_extract()

        data = make_data(cifar10)

    return data
コード例 #7
0
def send_alive(c_socket, server_address):
    global exit_flag
    data = utils.make_data(3, clientID)
    while True:
        # wait for 10s
        for i in range(10):
            if exit_flag == 1:
                break
            time.sleep(1)
        if exit_flag == 1:
            break
        # send client is alive
        c_socket.send_data(server_address, data)
    print("send_alive thread terminated")
コード例 #8
0
ファイル: train_pl.py プロジェクト: ankur56/ELFNet
    def setup(self, stage=None):
        X_train, X_test, y_train, y_test = utils.make_data(ch=self.ch,
                                                           path=self.path,
                                                           k=self.k)

        X_train_tensor = torch.Tensor(X_train)
        X_test_tensor = torch.Tensor(X_test)
        y_train_tensor = torch.Tensor(y_train)
        y_train_tensor = y_train_tensor.unsqueeze(1)
        y_test_tensor = torch.Tensor(y_test)
        y_test_tensor = y_test_tensor.unsqueeze(1)

        self.train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
        self.val_dataset = TensorDataset(X_test_tensor, y_test_tensor)
コード例 #9
0
ファイル: partC.py プロジェクト: spsina/ai_hw4
    # load the testing set, for later use
    x_test, y_test, labels_test = load(data_set_file)

    # calculate the accuracy
    N = len(labels_test)
    hit = 0

    for i in range(N):
        if decision_boundary(predict(np.array([1, x_test[i], y_test[i]]), weights)) == bool(labels_test[i]):
            hit += 1

    return hit / N * 100


# make training and test data randomly chosen from original dataset
make_data()

# load the training set
x, y, labels = load("training_set.data")

# class A indexes
class_A = [i for i in range(len(labels)) if labels[i] == value[A]]
# class B indexes
class_B = [i for i in range(len(labels)) if labels[i] == value[B]]

features = np.array([[1, x[i], y[i]] for i in range(len(labels))])
labels = np.array(labels)

# train on data
initial_weights = np.zeros(3)
weights, history = train(features, labels, initial_weights, 0.1, 100000)
コード例 #10
0
ファイル: generate_data.py プロジェクト: josh-gree/poly2poly
from utils import make_data
import sys
import os

train_path = '../../data/processed/train/'
val_path = '../../data/processed/val/'
test_path = '../../data/processed/test/'

os.mkdir(train_path)
os.mkdir(val_path)
os.mkdir(test_path)

# make train
for i in range(500):
    recon, min_diff, mid_energy = make_data()
    f = h5py.File(train_path + "{}.hdf5".format(i), "w")
    f.create_dataset('recon', data=recon)
    f.create_dataset('min_diff', data=min_diff)
    f.create_dataset('mid_energy', data=mid_energy)
    f.close()

# make validation
for i in range(100):
    recon, min_diff, mid_energy = make_data()
    f = h5py.File(val_path + "{}.hdf5".format(i), "w")
    f.create_dataset('recon', data=recon)
    f.create_dataset('min_diff', data=min_diff)
    f.create_dataset('mid_energy', data=mid_energy)
    f.close()
コード例 #11
0
def init():
    from utils import make_data
    make_data(db)
コード例 #12
0
ファイル: __main__.py プロジェクト: PuZheng/flask-report
def init():
    from utils import make_data
    make_data(db)
コード例 #13
0
def test():
    BATCH_SIZE = 40
    N_CLASSES = 12
    IMAGE_SIZE = 208
    C_DIMS = 3
    data_dir = '/gpfs/home/stu16/shaw/Tell_Cat/data/cat_12_test'
    savepath = os.path.join(os.getcwd(), 'checkpoint/test.h5')
    csv_file = 'result.csv'
    data=[]
    label=[]
    reset = False
    if reset:
        with open(csv_file,newline='',encoding='UTF-8') as cf:
            rows=csv.reader(cf)
            for r in rows:
                print(r)
                image_name = r[0]
                image = imageio.imread(os.path.join(data_dir,image_name))
                if len(image.shape) < 3:
                    image = np.expand_dims(image,2).repeat(3,axis=2)

                image = skimage.transform.resize(image[:,:,:3],(IMAGE_SIZE,IMAGE_SIZE))
                data.append(np.asarray(image,dtype=np.float16))
                label.append(int(r[1]))

        temp = np.array([data,label])
        temp = temp.transpose()
        np.random.shuffle(temp)
        data=list(temp[:,0])
        label=list(temp[:,1])
        print(type(data),type(data[0]),data[0].shape)
        print('MAKE test.h5 ...')
        ts=time.time()
        utils.make_data(savepath,data,label)
        te=time.time()
        print('MAKE SUCCESS!\nTime: %.2f' %(te-ts))

    # test_image = cv.imread('/gpfs/home/stu16/shaw/new/Test/5.jpg')
    # test_image = cv.resize(test_image,(IMAGE_SIZE,IMAGE_SIZE))
    # cv.imwrite('/gpfs/home/stu16/shaw/new/sample/5.jpg',test_image)
    # #test_image = tf.expand_dims(test_image,0)
    # print(type(test_image))
    # test_image = np.asarray(test_image,dtype=np.float32)
    # test_image = np.expand_dims(test_image,0)
    # print(type(test_image),test_image.shape)

    print('READ test.h5 ...')
    ts1=time.time()
    test_data, test_label = utils.read_data(savepath)
    te1=time.time()
    print('READ SUCCESS!\nTime: %.2f' %(te1-ts1))

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 208, 208, 3])

    model_package = '.'.join(['model','mynet'])
    model = importlib.import_module(model_package)
    pred = methodcaller('mynet',x, BATCH_SIZE,IMAGE_SIZE,C_DIMS,N_CLASSES)(model).model()
    logit = tf.nn.softmax(pred)

    
    
    # 我门存放模型的路径
    logs_train_dir = 'checkpoint/mynet'
    checkpoint_dir = '/gpfs/home/stu16/shaw/Tell_Cat/code/checkpoint'
    # 定义saver 
    saver = tf.train.Saver()  
    result=[]
    pre_label=[]
    true_label=[]
    M=np.zeros(shape=(12,12))
    with tf.Session() as sess:
        print("从指定的路径中加载模型。。。。")
        # 将模型加载到sess 中 
        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(ckpt.model_checkpoint_path)
        it = len(test_data) // BATCH_SIZE    #2160/48=45
        start_time = time.time()
        print(len(test_data),BATCH_SIZE,it)
        for idx in range(0, it):
            batch_images = test_data[idx*BATCH_SIZE : (idx+1)*BATCH_SIZE]
            batch_labels = test_label[idx*BATCH_SIZE : (idx+1)*BATCH_SIZE]
            #summary, _, err, acc = self.sess.run([self.merge, self.train_op, self.loss, self.acc], feed_dict={self.images: batch_images, self.labels: batch_labels})
            prediction = sess.run(logit, feed_dict={x: batch_images})
            #print(idx+1,prediction.shape,max_index,type(prediction),prediction[0,:],type(prediction[0,:]))
            maxvalue = [max(i) for i in prediction]
            maxindex = [np.argmax(i) for i in prediction]
            for i in range(0,BATCH_SIZE):
                P=maxindex[i]
                T=batch_labels[i]
                pre_label.append(P)
                true_label.append(T)
                M[T,P]+=1
        acc = sum(M.diagonal())/np.sum(M)
        print('ACC: %.2f' %(acc*100.0))
コード例 #14
0
    data = read_data(file='fanfic-corpus.txt',
                     fold=number,
                     window_size=window_size,
                     experiment=setting,
                     direction=case,
                     test=test)
    train = data[0]
    train_y = data[1]
    val = data[2]
    val_y = data[3]

    X = []
    y = []

    data_train = train
    make_data(data_train, train_y, X, y, indicators)
    split = len(X)

    data_val = val
    make_data(data_val, val_y, X, y, indicators)

    y_nontransform = y
    outputs = []
    for i in y_nontransform:
        for e in i:
            outputs.append(e)
    y = lb.fit_transform(y)
    t = Tokenizer(split=" ", lower=True, filters='@')
    t.fit_on_texts(X)
    encoded_docs = t.texts_to_sequences(X)
    vocab_size = len(t.word_index) + 1
コード例 #15
0

# ----------------------------------------------------------------------------------------------------------------------
# Generate synthetic data for the twin experiment
# ----------------------------------------------------------------------------------------------------------------------

print("Generating true state and observations...")

dt = 0.025 / 6.0    # Model integration time step (equivalent to 30min)
n_steps_in_day = 48
n_steps = 1000

x0 = np.ones(n) * 8.0 # Initial state
x0[20] = 8.004        # Perturb 20-th coordinate

xt, yobs = make_data(model_fn=model.__call__, x0=x0, dt=dt, H=H, Q=Q, R=R, nsteps=n_steps, nspin=0)


# ----------------------------------------------------------------------------------------------------------------------
# Ready to run...
# ----------------------------------------------------------------------------------------------------------------------

xall = np.zeros((len(enkfs)+1, n_steps, n))
rmse = np.zeros((len(enkfs)+1, n_steps))

# Called when a smoother solution is ready. Here we will just store the result and RMSE to be plotted later
def on_result(x, A, t, args):
    kfi, is_ensemble = args
    xall[kfi, t, :] = x
    rmse[kfi, t] = math.sqrt(np.mean((x - xt[:, t])**2))
コード例 #16
0
ファイル: mlp.py プロジェクト: njkrichardson/fouriernets
    # network parameters
    layer_sizes = [n_bins, 64, 4]
    L2_reg = 1.0

    # training parameters
    param_scale = 0.1
    batch_size = 256
    num_epochs = 75
    step_size = 0.001

    # data size
    n_data = 1000  # number of data per class

    # generate data
    print("Generating data...")
    train_inputs, test_inputs, train_labels, test_labels = make_data(
        n_per_class=n_data, n_bins=n_bins, n_draws=n_draws, split=True)

    # initialize the net
    init_params = init_mlp_params(param_scale, layer_sizes)

    # batching parameters
    num_batches = int(np.ceil(len(train_inputs) / batch_size))

    def batch_indices(iter):
        idx = iter % num_batches
        return slice(idx * batch_size, (idx + 1) * batch_size)

    # define training objective: negative log marginal likelihood
    def objective(params, iter):
        idx = batch_indices(iter)
        return -mlp_log_posterior(params, train_inputs[idx], train_labels[idx],
コード例 #17
0
    def train(self, sess, config):
        savepath = os.path.join(os.getcwd(), 'checkpoint/train.h5')
        if config.reset:
            data_dir = os.path.join(config.data_dir, 'cat_12_train')
            data_list = os.path.join(config.data_dir, 'train_list.txt')
            data = []
            label = []
            cnt = 1
            with open(data_list, 'r') as f:
                for fn in f:
                    l = fn.split()
                    label.append(int(l[1]))
                    #image = cv2.imread(config.data_dir + l[0])
                    #image = cv2.resize(image,(self.image_size,self.image_size),interpolation = cv2.INTER_AREA)
                    image = imageio.imread(config.data_dir + l[0])
                    print(config.data_dir + l[0], image.shape)

                    if len(image.shape) < 3:  #gray_image
                        image = np.expand_dims(image, 2).repeat(3, axis=2)

                    image = skimage.transform.resize(
                        image[:, :, :3], (self.image_size, self.image_size))
                    data.append(np.asarray(image, dtype=np.float16))

            print(type(data), type(data[0]), data[0].shape)
            temp = np.array([data, label])
            temp = temp.transpose()
            np.random.shuffle(temp)
            data = list(temp[:, 0])
            label = list(temp[:, 1])
            print(type(data), type(data[0]), data[0].shape)
            print('MAKE train.h5 ...')
            ts = time.time()
            utils.make_data(savepath, data, label)
            te = time.time()
            print('MAKE SUCCESS!\nTime: %.2f' % (te - ts))

        print('READ train.h5 ...')
        ts1 = time.time()
        train_data, train_label = utils.read_data(savepath)
        te1 = time.time()
        print('READ SUCCESS!\nTime: %.2f' % (te1 - ts1))

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)
        tf.global_variables_initializer().run()

        # coord = tf.train.Coordinator()
        # threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        #if self.load(self.checkpoint_dir):
        #  print(" [*] Load SUCCESS")
        #else:
        #  print(" [!] Load failed...")
        #if config.restore:
        #pre_model = tf.train.import_meta_graph('my-model-1000.meta')
        #loading restore model
        if config.restore:
            self.load(config.checkpoint_dir, config.restore_model)

        counter = 0
        print("Training ...")
        summary_dir = os.path.join('logs',
                                   datetime.now().strftime("%b%d_%H:%M:%S"))
        writer = tf.summary.FileWriter(summary_dir, sess.graph)
        for ep in range(config.epoch):
            it = len(train_data) // config.batch_size  #2160/48=45
            print("Epoch: [%5d/%d]    Learning_rate: %.e" %
                  (ep + 1, config.epoch, config.learning_rate))
            start_time = time.time()
            for idx in range(0, it):
                batch_images = train_data[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                batch_labels = train_label[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                summary, _, err, acc = self.sess.run(
                    [self.merge, self.train_op, self.loss, self.acc],
                    feed_dict={
                        self.images: batch_images,
                        self.labels: batch_labels
                    })
                counter += 1
                if (idx + 1) % 9 == 0:
                    print("[%3d/%d]  Loss: [%2.4f]  Acc: [%3.2f]  Time: [%.2f]  " \
                      %(idx+1, it, err, acc*100, time.time()-start_time))
                if (idx + 1) % it == 0:
                    self.save(config.checkpoint_dir, ep + 1)
            writer.add_summary(summary, ep + 1)
コード例 #18
0
# Load the model in fairseq
from fairseq.models.roberta import RobertaModel
import glob2
from tqdm import tqdm
import utils

dns_home = '/storage/hieuld/NLP'
phoBERT = RobertaModel.from_pretrained('PhoBERT_base_fairseq', checkpoint_file='model.pt')
phoBERT.eval()  # disable dropout (or leave in train mode to finetune

train_path = 'Data/Train_Full/*/*.txt'
test_path = 'Data/Test_Full/*/*.txt'


text_train, label_train = utils.make_data(train_path)
text_test, label_test = utils.make_data(test_path)

# Lưu lại các files

utils._save_pkl('Data/text_train.pkl', text_train)
utils._save_pkl('Data/label_train.pkl', label_train)
utils._save_pkl('Data/text_test.pkl', text_test)
utils._save_pkl('Data/label_test.pkl', label_test)



コード例 #19
0
def client(serverIP, serverPort, clientID):
    # client init
    # print("Init client")
    global client_table, exit_flag
    exit_flag = 0
    client_table = {}  ## client_table dataform : { clientID : client_address}

    # 함수 dic
    cmd2mode = {'@show_list':print_list, \
                 '@chat':send_msg, \
                 '@exit':send_exit \
                }

    #소켓 생성
    try:
        #    print("Make socket...")
        server_address = (serverIP, serverPort)
        client_socket = ctrl_socket.ctrl_socket(('', clientPort), 'client')
    #    print("Make socket completed")
    except:
        print("Make socket failed")
        exit(0)

    # data 받는 thread 생성
    th_recv_data = threading.Thread(target=recv_data, args=(client_socket, ))
    th_recv_data.start()
    time.sleep(0.1)
    # 서버에 CID 전송
    try:
        #    print("Send CID to server...")
        data = utils.make_data(0, [clientID, server_address])
        client_socket.send_data(server_address, data)
    #    print("Send CID to server completed")
    except:
        print("Send CID to server failed")
        exit(0)

    # sending alive thread 생성
    th_send_alive = threading.Thread(
        target=send_alive,
        args=(client_socket, server_address),
    )
    th_send_alive.start()

    print("Init client completed")
    print("Start Shell...")
    while True:
        cmd = input(">> ")
        sys.stdout.flush()
        # 입력받은 command parsing
        mode, address, msg = splitcmd(cmd, server_address)

        # command 예외처리
        if mode not in cmd2mode:
            continue

        # command에 맞는 함수
        cmd2mode[mode](client_socket, address, clientID, msg)

        if mode == '@exit':
            # client was terminated
            exit_flag = 1
            del client_socket
            th_recv_data.join()
            th_send_alive.join()
            break
    print(clientID, "terminates")
    sys.exit()
コード例 #20
0
def send_exit(socket, address, cid, msg):
    data = utils.make_data(4, cid)
    socket.send_data(address, data)
コード例 #21
0
import os


# To decide if a sample will be in training or testing
import random

############### IMPORTANT - Specify the path to the input music file ####################
file_input = '/Users/aydinabiar/Desktop/MALIS Project/mozart_samples/mid/lacrimosa_original.mid'



# Read and store the chords of the file
chords, initial_stream = u.read_midi(file_input)

# Create dataset 
X, y = u.make_data(chords)

# Create training and testing data. use parameter random_state = int to fix one data for multiple calls
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)

# Fitting a multi classification model using SVM with different kernels :
#  Linear, Radial Basis, Polynomial and Sigmoid
linear = svm.SVC(kernel='linear', C=1, decision_function_shape='ovo').fit(X_train, y_train)
rbf = svm.SVC(kernel='rbf', gamma=1, C=1, decision_function_shape='ovo').fit(X_train, y_train)
poly = svm.SVC(kernel='poly', degree=3, C=1, decision_function_shape='ovo').fit(X_train, y_train)
sig = svm.SVC(kernel='sigmoid', C=1, decision_function_shape='ovo').fit(X_train, y_train)

linear_pred = linear.predict(X_test)
poly_pred = poly.predict(X_test)
rbf_pred = rbf.predict(X_test)
sig_pred = sig.predict(X_test)