Ejemplo n.º 1
0
    def fit(self, epochs, batch_size=32, verbose=1, workers=10):
        self.createModel()
        with open('layoutannotations.json') as f:
            annotations = json.load(f)
        _, split = load()
        with open('lstminput.json') as f:
            lstminput = json.load(f)
        training_generator = DataGeneratorEntity(annotations=annotations,
                                                 video_files=split['train'],
                                                 F=self.F,
                                                 batch_size=batch_size,
                                                 LSTM=self.LSTM,
                                                 lstminput=lstminput)
        validation_generator = DataGeneratorEntity(annotations=annotations,
                                                   video_files=split['val'],
                                                   F=self.F,
                                                   batch_size=batch_size,
                                                   LSTM=self.LSTM,
                                                   lstminput=lstminput)
        self.model.fit_generator(generator=training_generator,
                                 validation_data=validation_generator,
                                 epochs=epochs,
                                 use_multiprocessing=True,
                                 workers=workers,
                                 verbose=verbose)

        return
Ejemplo n.º 2
0
    def fit(self, epochs, batch_size=32):

        self.createModel()
        with open('layoutannotations.json') as f:
            annotations = json.load(f)
        _, split = load()
        with open('lstminput.json') as f:
            lstminput = json.load(f)
        tf.flags.DEFINE_integer("batch_size", 32, "Batch size during training")
        tf.flags.DEFINE_integer("eval_batch_size", 8,
                                "Batch size during evaluation")

        training_generator = DataGeneratorLayout(annotations=annotations,
                                                 video_files=split['train'],
                                                 F=self.F,
                                                 batch_size=8,
                                                 LSTM=self.LSTM,
                                                 lstminput=lstminput,
                                                 graph=self.graph)
        validation_generator = DataGeneratorLayout(annotations=annotations,
                                                   video_files=split['val'],
                                                   F=self.F,
                                                   batch_size=8,
                                                   LSTM=self.LSTM,
                                                   lstminput=lstminput,
                                                   graph=self.graph)
        self.model.fit_generator(generator=training_generator,
                                 validation_data=validation_generator,
                                 epochs=50,
                                 use_multiprocessing=True,
                                 workers=10)
        '''			
Ejemplo n.º 3
0
def main():

    filename = 'data/inflammation-01.csv'

    data = loaddata.load(filename)

    print(filename)

    print(data.mean(axis=1))
Ejemplo n.º 4
0
def f**k():
	annotations, split = load()
	layoutanno = dict()
	i = 1
	for annotation in annotations:
		layoutanno.update({annotation['globalID']:annotation})
		print("[{}] ".format(i)+annotation['globalID'])
		i = i + 1
	with open('layoutannotations.json','w') as fp:
		json.dump(layoutanno,fp)
	return
Ejemplo n.º 5
0
	def fit(self, epochs, batch_size=32, verbose=1, workers=30):

		self.createModel()
		#self.model.load_weights('LayoutCheckpoints/weights-improvement-15-15603.58.hdf5')
                #opt = Adam(lr=0.001, decay=0.5, amsgrad=False)  #weight decay 0.0001
                #self.model.compile(optimizer=opt, loss={'dense_4':self.loss2, 'activation_1':self.loss1}, loss_weights=[1, 1], metrics=[])
		with open('layoutannotations.json') as f:
			annotations = json.load(f)
		_, split = load()
		with open('lstminput.json') as f:
			lstminput = json.load(f)
		tf.flags.DEFINE_integer("batch_size", 32, "Batch size during training")
		tf.flags.DEFINE_integer("eval_batch_size", 8, "Batch size during evaluation")
		
		filepath="LayoutCheckpoints/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
		checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')

		training_generator = DataGeneratorLayout(annotations=annotations, video_files=split['train'], F=self.F, batch_size=batch_size, LSTM=self.LSTM, lstminput=lstminput, graph=self.graph)
		validation_generator = DataGeneratorLayout(annotations=annotations, video_files=split['val'], F=self.F, batch_size=batch_size, LSTM=self.LSTM, lstminput=lstminput, graph=self.graph)
		self.history = self.model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=epochs, use_multiprocessing=True, workers=workers, verbose=verbose, callbacks=[checkpoint])
		self.model.save('LayoutComposerModel.h5')			
		'''			
Ejemplo n.º 6
0
# -*- coding: utf-8 -*-

import numpy as np

import loaddata as ld

import gradientdescent as GD

import normalization as norm

X, Y, n = ld.load('data.txt')

X = norm.normalize(X, n)

X = X.reshape((n * 2))

tmp = []
for i in xrange(0, 2 * n, 2):#Оторвать мне руки 
    tmp.append(1)
    tmp.append(X[i])
    tmp.append(X[i + 1])

X = np.array(tmp).reshape(n, 3)

print X

alpha = 0.01;
iterations = 400;

theta = np.zeros((3, 1))#init fitting params
Ejemplo n.º 7
0
Archivo: main.py Proyecto: modu/scripts
import numpy as np

import loaddata as ld

import cost

X, Y, m = ld.load('data.txt')

theta = np.zeros((2 + 1, 1))

print cost.costfunc(theta, X, Y, m)
Ejemplo n.º 8
0
import numpy as np

import loaddata as ld

import computeCost as cC

import gradientdescent as GD

import math


X, Y, m = ld.load('ex1data.txt')#load X's and Y's m - len of dataset

theta = np.zeros((2, 1))#init fitting params

#should be 32.07
print 'Cost:', cC.compCost(X, Y, theta)

#Some gradient descent settings
iterations = 1500;
alpha = 0.01;


theta,J_history = GD.GDescent(X, Y, theta, alpha, iterations)
print 'theta: ',  theta


pvalue = 3.5
#predict = 1/(1+ math.exp( np.dot( np.array([1, pvalue]).reshape(1, 2), theta)))
predict = np.dot( np.array([1, pvalue]).reshape(1, 2), theta )
Ejemplo n.º 9
0
Archivo: main.py Proyecto: modu/scripts
import numpy as np

import loaddata as ld

import computeCost as cC

import gradientdescent as GD

import math

X, Y, m = ld.load('ex1data.txt')  #load X's and Y's m - len of dataset

theta = np.zeros((2, 1))  #init fitting params

#should be 32.07
print 'Cost:', cC.compCost(X, Y, theta)

#Some gradient descent settings
iterations = 1500
alpha = 0.01

theta, J_history = GD.GDescent(X, Y, theta, alpha, iterations)
print 'theta: ', theta

pvalue = 3.5
#predict = 1/(1+ math.exp( np.dot( np.array([1, pvalue]).reshape(1, 2), theta)))
predict = np.dot(np.array([1, pvalue]).reshape(1, 2), theta)
print 'predict for ', pvalue, predict
Ejemplo n.º 10
0
from scipy.io import savemat
import loaddata
import scipy
import numpy as np
import torch
import os

torch.manual_seed(0)

dataset_name = 'ppi'
for noise_level in [0, 1.0, 2.0, 3.0]:
    a1, f1, a2, f2, ground_truth = loaddata.load(dataset_name,
                                                 noise_level=noise_level)
    print(f1, f2)
    feature_size = f1.size(1)
    ns = [f1.size(0), f2.size(0)]
    # edge_list_1 = get_edge_list(a1, f1.size(0))
    # edge_list_2 = get_edge_list(a2, f2.size(0))
    # features = [f1, f2]
    # edges = [a1, a2]
    f = open('ppi_combined_edges.txt', 'w')
    print(ns)
    print(a1.size())
    print(ground_truth)
    n = ns[0]
    print(n)
    t1 = (a1[0] < a1[1]).nonzero()
    t2 = (a2[0] < a2[1]).nonzero()
    a1 = a1[:, t1]
    a2 = a2[:, t2]
    g = ground_truth
Ejemplo n.º 11
0
import numpy as np
import json
from lstm import lstm
from loaddata import load
from keras.models import *
import cv2

annotations, _ = load()
F = 75
'''n_words=6414
n_tags=42
LSTM = lstm(hidden=64)
LSTM.load_weights('entity_lstm.h5')'''

from preprocess import preProcessData
from keras.models import Model, Input
from keras.layers import *
from keras_contrib.layers import CRF
from keras.utils import plot_model
from keras.models import load_model
import json
import numpy as np

from keras.callbacks import ModelCheckpoint
#from livelossplot import PlotLossesKeras

BATCH_SIZE = 512  # Number of examples used in each iteration
EPOCHS = 20  # Number of passes through entire dataset
MAX_LEN = 75  # Max length of review (in words)
EMBEDDING = 100  # Dimension of word embedding vector
Ejemplo n.º 12
0
# -*- coding: utf-8 -*-

import numpy as np

import loaddata as ld

import gradientdescent as GD

import normalization as norm

X, Y, n = ld.load('data.txt')

X = norm.normalize(X, n)

X = X.reshape((n * 2))

tmp = []
for i in xrange(0, 2 * n, 2):  #Оторвать мне руки
    tmp.append(1)
    tmp.append(X[i])
    tmp.append(X[i + 1])

X = np.array(tmp).reshape(n, 3)

print X

alpha = 0.01
iterations = 400

theta = np.zeros((3, 1))  #init fitting params
Ejemplo n.º 13
0
def main():
    filename=sys.argv[1] # a new change!!
	data = loaddata.load(filename)
	print filename
	print data.mean(axis=1)
Ejemplo n.º 14
0
            args.__dict__[t] = arg_dict[t]
    except:
        print('Error in loading config and use default setting instead')

print(args)
if args.setup == 1:
    args.net = GCNNet
elif args.setup == 2:
    args.net = GATNet
elif args.setup == 3 or args.setup == 4:
    args.net = LGCN

dataset_name = args.dataset
noise_level = args.noise
if dataset_name in ['douban']:
    a1, f1, a2, f2, ground_truth, prior = load(dataset_name,
                                               noise_level=noise_level)
    feature_size = f1.shape[1]
    ns = [a1.shape[0], a2.shape[0]]
    edge_1 = torch.LongTensor(np.array(a1.nonzero()))
    edge_2 = torch.LongTensor(np.array(a2.nonzero()))
    ground_truth = torch.tensor(np.array(
        ground_truth, dtype=int)) - 1  # Original index start from 1
    features = [
        torch.FloatTensor(f1.todense()),
        torch.FloatTensor(f2.todense())
    ]
    edges = [edge_1, edge_2]
    prior = torch.FloatTensor(prior)
    prior_rate = args.prior_rate
elif dataset_name in ['ppi', 'arena']:
    a1, f1, a2, f2, ground_truth = load(dataset_name, noise_level=noise_level)
Ejemplo n.º 15
0
if args.pospath:
    modelname += '_pos'
    pre_textname += '_pos'
    pre_embedname += '_pos'
if args.embed_size != 100:
    modelname += f'_v{args.embed_size}'
    pre_textname += f'_v{args.embed_size}'
    pre_embedname += f'_v{args.embed_size}'

################################
# train or evaluation
################################
if args.train:
    print('[{0:.15s}] Train'.format('STATE'))
    # 1. load data
    data, _, infos = load(reviewpath, productpath, infopath)

    # 2. preprocessing train set
    text = GP.fit(data.content.tolist(),
                  wordfix_path=args.wordpath,
                  posfix_path=args.pospath)

    # save preprocessed text
    with open(f'{args.savedir}/{pre_textname}.pickle', 'wb') as f:
        pickle.dump(text, f)

    # 2.1 product description
    description = infos.description.str.replace('\n', ' ').tolist()
    description = list(map(GP.stopword, description))
    description = list(map(GP.kkma.nouns, description))
Ejemplo n.º 16
0
def main():
    filename = sys.argv[1]
    data = loaddata.load(filename)
    print(filename)
    print(data.mean(axis=1))