Пример #1
0
    'dydPath': dydPath,
}
del savPath, dydPath

# these files will change after refactor, required after locations definition

if ver:
    exec(open("./initPSLF3.py").read())
    exec(open("./makeGlobals3.py").read())
else:
    execfile('initPSLF.py')
    execfile('makeGlobals.py')

PSLF.RunEpcl("dispar[0].noprint = 1")  # turn off terminal solution details
# mirror arguments: locations, simParams, debug flag
mir = Model(locations, simParams, 1)

# Pertrubances configured for test case (eele)
# step up and down (pgov test)
mir.addPert('Load', [3], 'Step', ['P', 2, 101])  # quick 1 MW step
mir.addPert('Load', [3], 'Step', ['P', 30, 100])  # quick 1 MW step

# GE 4 machine test
#mir.addPert('Load',[5],'Step',['P',2,4,'rel']) # step 4 MW up
#mir.addPert('Load',[5],'Step',['P',52,-4,'rel']) # step back to normal
#mir.addPert('Load',[5],'Step',['St',2,0]) # turn load off
#mir.addPert('Load',[5],'Step',['St',3,1]) # turn load on
#mir.addPert('Load',[6],'Step',['P',15,4,'rel']) # step 4 MW up
#mir.addPert('Load',[6],'Step',['P',25,4,'rel']) # step 4 MW up
#mir.addPert('Load',[6],'Step',['P',55,-8,'rel']) # step back to normal
Пример #2
0
from http.server import BaseHTTPRequestHandler, HTTPServer
from Model import Model
from View import View

model = Model()
view = View()

initialized = False


class RequestHandler(BaseHTTPRequestHandler):

    # GET
    def do_GET(self):

        global model
        global view
        global initialized

        if initialized:
            model.update()

        else:
            initialized = True

        data = view.generateImage(model.game.gameBoard)

        # Send response status code
        self.send_response(200)

        # Send headers
Пример #3
0
    def draw(self,
             text,
             filename,
             active_cu,
             active_pe=None,
             active_router=None,
             active_router_edge=None,
             active_layers=[],
             active_windows=None):
        #if np.count_nonzero(active_cu) == 0 and np.count_nonzero(active_router) == 0:
        #    return
        assert len(active_cu) == len(self.weight_mapping)

        # Draw cu mapping
        self.ensure_cu_color_default()

        cu_color = self.cu_color_default.copy()
        for k, v in enumerate(active_cu):
            if v != 0:
                t = list(cu_color[k])
                t[3] = 0.8
                cu_color[k] = tuple(t)

        nx.draw(self.cu_G,
                self.cu_pos,
                node_color=cu_color,
                node_shape='s',
                node_size=self.node_size)

        # Draw PE
        current_axis = plt.gca()
        padding = 0.1
        for i in range(0, self.pe_width):
            for j in range(0, self.pe_height):
                x = (i * (self.CU_num_x + 1)) - 0.5 - padding
                y = 1 + (j * (self.CU_num_y + 1)) - 0.5 - padding
                current_axis.add_patch(
                    Rectangle((x, y),
                              width=self.CU_num_x + padding * 2,
                              height=self.CU_num_y + padding * 2,
                              fill=False))

        # Draw Router
        if active_router == None:
            active_router = [0] * self.router_size
        router_color = [(0.2, 0, 0)] * self.router_size
        for k, v in enumerate(active_router):
            if v != 0:
                t = list(router_color[k])
                t[0] = 1
                router_color[k] = tuple(t)

        nx.draw(self.router_G,
                self.router_pos,
                node_color=router_color,
                node_shape='o',
                node_size=self.node_size)

        # Draw text
        nx.draw(self.empty_G,
                pos=self.empty_pos,
                node_color='none',
                node_shape='s',
                node_size=self.node_size)
        self.fig.text(.5, 0.03, text, ha='center', fontsize=15)

        # Draw model graph
        plt.subplot2grid((1, 4), (0, 3))

        from Model import Model
        model = Model(self.model_config)

        show_layers = []
        layer_height = []
        layer_height_offset = []
        layer_width = []
        model_height = 0
        model_width = 0
        height_padding = 3
        for i in range(0, len(self.model_config.layer_list)):
            if not i in CARE_LAYERS:
                continue
            if self.model_config.layer_list[
                    i].layer_type == 'convolution' or self.model_config.layer_list[
                        i].layer_type == 'fully':
                show_layers.append(i)
                layer_height.append(model.input_h[i])
                layer_width.append(model.input_w[i])
                model_height += model.input_h[i]
                model_width = max(model_width, model.input_w[i])
        total_width = model_width * 2 + 3 + 1
        total_height = model_height + height_padding * (len(show_layers) + 1)

        nlayers = len(show_layers)
        model_color = [0] * nlayers
        v = -1
        for i in range(0, nlayers):
            model_color[i] = self.get_layer_color(
                self.nlayer_mapping[show_layers[i]] + 1)
            if show_layers[i] in active_layers:
                t = list(model_color[i])
                t[3] = 0.8
                model_color[i] = tuple(t)
            # alpha to color:
            t = list(model_color[i])
            t[0] = 1 - (1 - t[0]) * t[3]
            t[1] = 1 - (1 - t[1]) * t[3]
            t[2] = 1 - (1 - t[2]) * t[3]
            t[3] = 1
            model_color[i] = tuple(t)

        model_G = nx.grid_2d_graph(1, nlayers)

        p = []
        labels = {}
        height = total_height - 1
        for i in range(0, nlayers):
            layer_height_offset.append(height - height_padding)
            p.append([
                1 + (model_width // 2),
                height - height_padding - (layer_height[i] // 2)
            ])
            labels[(
                0, i
            )] = f'{self.model_config.layer_list[show_layers[i]].layer_type}\nInput Tensor:'
            height -= layer_height[i] + height_padding

        nodes = list(model_G.nodes)
        model_pos = {}
        for i in range(0, len(nodes)):
            model_pos[nodes[i]] = p[i]

        nx.draw(model_G,
                model_pos,
                node_color=model_color,
                node_shape='o',
                node_size=int((self.W * 10 * model_width / total_width)**2))
        nx.draw_networkx_labels(model_G, model_pos, labels)

        current_axis = plt.gca()
        padding = 0.1
        for k in range(0, nlayers):
            model_G = nx.grid_2d_graph(layer_width[k], layer_height[k])
            model_G.remove_edges_from(model_G.edges)

            p = []
            for i in range(0, layer_width[k]):
                for j in range(0, layer_height[k]):
                    p.append(
                        [1 + model_width + 1 + i, layer_height_offset[k] - j])

            nodes = list(model_G.nodes)
            model_pos = {}
            for i in range(0, len(nodes)):
                model_pos[nodes[i]] = p[i]
            nx.draw(model_G,
                    model_pos,
                    node_shape='s',
                    node_color='gray',
                    node_size=int((self.W * 10 / total_width)**2))

            x = 1 + model_width + 1 - 0.5 - padding
            y = layer_height_offset[k] - layer_height[k] + 0.5 - padding
            current_axis.add_patch(
                Rectangle((x, y),
                          width=layer_width[k] + padding * 2,
                          height=layer_height[k] + padding * 2,
                          fill=False))

            if active_windows != None:
                for window_id in active_windows:
                    if show_layers[k] == window_id[0]:
                        x = 1 + model_width + 1 - 0.5 + window_id[2]
                        y = layer_height_offset[k] - window_id[3] + 0.5
                        current_axis.add_patch(
                            Rectangle((x, y),
                                      width=window_id[4] - window_id[2],
                                      height=window_id[3] - window_id[1],
                                      fill=False,
                                      color='red'))

        # Draw bottom-left and up-right invisible node (for border)
        nx.draw(self.empty_G,
                pos=self.empty_pos,
                node_color='none',
                node_shape='o',
                node_size=int((self.H * 15 / nlayers)**2))
        nx.draw(self.empty_G,
                pos={self.empty_node: [total_width, total_height]},
                node_color='none',
                node_shape='o',
                node_size=int((self.H * 15 / nlayers)**2))

        plt.tight_layout()

        plt.savefig(f"{filename}.png", format="PNG")

        self.init_fig()
Пример #4
0
    # attributes of the agent
    num_states = 80
    num_actions = 4
    max_steps = 5400  # seconds = 1 h 30 min each episode
    green_duration = 10
    yellow_duration = 4

    # setting the cmd mode or the visual mode
    if gui == False:
        sumoBinary = checkBinary('sumo')
    else:
        sumoBinary = checkBinary('sumo-gui')

    # initializations
    model = Model(num_states, num_actions, batch_size)
    memory = Memory(memory_size)
    traffic_gen = TrafficGenerator(max_steps)
    sumoCmd = [
        sumoBinary, "-c", "intersection/tlcs_config_train.sumocfg",
        "--no-step-log", "true", "--waiting-time-memory",
        str(max_steps)
    ]
    saver = tf.train.Saver()

    with tf.Session() as sess:
        print("PATH:", path)
        print("----- Start time:", datetime.datetime.now())
        sess.run(model.var_init)
        sim_runner = SimRunner(sess, model, memory, traffic_gen,
                               total_episodes, gamma, max_steps,
Пример #5
0
MEMORY_WARMUP_SIZE = 1000  # replay_memory 里需要预存一些经验数据,再开启训练
BATCH_SIZE = 64  # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.001  # 学习率
GAMMA = 0.9  # reward 的衰减因子,一般取 0.9 到 0.999 不等

gpu = fluid.CUDAPlace(0)
fluid.Executor(gpu)

env = PaddleEnv()
action_dim = 3  #动作 一共有三种
obs_shape = [5]  #观察量五种

rpm = ReplayMemory(MEMORY_SIZE)  # DQN的经验回放池

# 根据parl框架构建agent
model = Model(act_dim=action_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = Agent(
    algorithm,
    obs_dim=obs_shape[0],
    act_dim=action_dim,
    e_greed=0.05,  # 有一定概率随机选取动作,探索
    e_greed_decrement=10e-7)  # 随着训练逐步收敛,探索的程度慢慢降低

# 加载模型
save_path = './Model/dqn_model.ckpt'
agent.restore(save_path)

while True:  # 训练max_episode个回合,test部分不计算入episode数量
    obs = env.reset()
    episode_reward = 0
Пример #6
0
import stateUpdaters
import tqdm
import plotting
import matplotlib.pyplot as plt

backdate = 0
inputs = inputters.FakeInputs("data/run_9_glucose.csv")
su = stateUpdaters.FakeStateUpdate("data/run_9_conc.csv", backdate=backdate)

ts = numpy.linspace(0, 200, 200)

# Biomass C H_1.8 O_0.5 N_0.2 => 24.6 g/mol
#     Ng, Nx, Nfa, Ne, Nco, No, Nn, Na, Nb, Nz, Ny, V, Vg
X0 = [0, 4.6 / 24.6, 0, 0, 0, 0, 0, 1e-5, 0, 5.1, 1.2, 1.077, 0.1, 25]

m = Model(X0, inputs, pH_calculations=True)
Xs = [X0]

# State estimation
t_predict = 1
se = StateEstimator.StateEstimator(X0, inputs, t_predict)

live_plot = False

if live_plot:
    plt.figure(figsize=(20, 20))
    plt.ion()

for ti in tqdm.tqdm(ts[1:]):
    m.step(ts[1])
    se.step(ts[1])
Пример #7
0
    return inputs, targets

if __name__ == "__main__":

    resultsDF = {}
    eps = sys.argv[1]
    num_samples = int(sys.argv[2])

    with tf.Session() as sess:
        init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        _ = sess.run([init_op])

        print("EPSILON = %s" % eps)
        resultsDict = {}
        modelPath = "trained_models/ndppca_eps_%s_600_amortized" % eps
        data, model = MNIST(), Model(modelPath) 
        inputs, targets = generate_data(data, samples=num_samples, targeted=True,
                                        start=0, inception=False)

        plc = tf.placeholder_with_default(tf.zeros((1, 28, 28, 1), dtype=tf.float32), shape=(None, 28, 28, 1))
        mnist_output = model.predict(plc)

        def run_model(the_inputs):
            the_inputs = the_inputs.reshape((-1, 28, 28, 1))
            output = sess.run([mnist_output], feed_dict={plc: the_inputs})
            return output

        attack = CarliniL2(sess, model, max_iterations=1000, confidence=0)
        adv = attack.attack(inputs, targets, model)

        # None adversarial examples for each sample of MNIST. 
Пример #8
0
# main_program
for i in range(len(sys.argv)):
    if (sys.argv[i] == "-modelName"):
        model_name = sys.argv[i + 1]
    elif (sys.argv[i] == "-data"):
        data_loc = sys.argv[i + 1]
    elif (sys.argv[i] == "-target"):
        target_loc = sys.argv[i + 1]

if not os.path.exists(model_name):
    os.makedirs(model_name)

batch_size = 12
criterion = Criterion()
dataset = Dataset(batch_size)
model = Model(2, 128, 153, 153, True)

dataset.read_data(data_loc, 'X_train')
dataset.read_data(target_loc, 'Y_train')
train_data_len = len(dataset.X_train)

model.add_layer(RNN(153, 128, 20))
model.add_layer(Linear(128, 2))

train(8, 1)
train(3, 1e-1)
accuracy(0, train_data_len)
train(6, 1e-2)
accuracy(0, train_data_len)
train(3, 1e-3)
accuracy(0, train_data_len)
Пример #9
0
def run_with_save():
    # lam theo yeu cau cua bai tap giao su cho:
    # Your simulation runs as follows:
    # Plot the initial grid as a heat-map.
    # Plot the initial distribution of the happiness in your city.
    # Iterate over 4) 20 times
    # Calculate happiness for each agent. Relocate an unhappy agent to a random empty cell.
    # Plot the final grid as a heat-map.
    # Plot the final distribution of the happiness in your city.

    model.plot_grid(True, "initial.png")  # plot initial
    model.plot_happiness(
        "happiness.png"
    )  # Plot the initial distribution of the happiness in your city.
    for i in range(step):  # Iterate over 4) 20 times
        if not model.is_happy():
            model.step()
    model.plot_grid(True, "after20.png")  # Plot the final grid as a heat-map.
    model.plot_happiness(
        "after20h.png"
    )  # Plot the final distribution of the happiness in your city.


'''MAIN'''
model = Model(size, size, 1 - empty_ratio, similarity)

#run_without_pause()
#run()
run_with_save()
Пример #10
0
# -*- coding: utf-8 -*-
"""
Created on Sat Mar  7 16:05:15 2020

@author: Chris
"""

from Model import Model
from Network import G
import pandas as pd

#May be replaced by identical step in Model
model = Model(len(G.nodes()))
for i in range(3650):
    model.step()

traits = model.datacollector.get_agent_vars_dataframe()
traits.to_csv("traits.csv")
Пример #11
0
from Controller import Controller
from Model import Model

from configuration.configParser import *

if __name__ == "__main__":
    ''' The result of config parser. '''
    result_dict = config_parser_result()
    ''' Model creation. '''
    model = Model('storage', result_dict[save_type_key])
    ''' Controller creation. Use model as agruement. '''
    controller = Controller(model)
    ''' The start of console application. '''
    controller.start()
    ''' The save of the model. '''
    model.save()
env = Environment(portnumber, scene, file, order, action_values)

num_states = env._num_states
num_actions = env._num_actions

BATCHSIZE = 128

MAX_EPS = 1
MIN_EPS = 0.001
DECAY = 0.0004
GAMMA = 0.8

NUM_EPISODE = 10000

model = Model(num_states, num_actions, BATCHSIZE)
mem = Memory(700000)
vis = VisuJSON(env._refZMP._X, env._refZMP._Y)

trajectory = {}

saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(model._var_init)
    sr = SimRunner(sess, model, env, mem, MAX_EPS, MIN_EPS, DECAY, GAMMA)
    cnt = 0
    while cnt < NUM_EPISODE + 1:
        start = time.time()

        # if cnt % 10 == 0:
        #     print('------------------------------------------------------------------------------')
Пример #13
0
    def produce_simulation(self):

        if not (path.exists(self.errorCSV)):
            print(
                "No simulation for these parameters exists in Uncertainty data. Proceeding with simulation"
            )

            # Initialize the models that are saved using the parameters declared above
            predictor = Model(self.nstep)
            predictor.load_FOPTD()

            deviations = np.arange(0, self.maxError)

            stdev = np.array([0])
            error = np.array([0])
            kp_pred = np.array([0])
            theta_pred = np.array([0])
            tau_pred = np.array([0])

            kp_true = np.array([0])
            theta_true = np.array([0])
            tau_true = np.array([0])

            for deviation in deviations:
                numTrials = self.numTrials
                nstep = self.nstep
                timelength = self.timelength
                trainFrac = self.trainFrac
                # then simulates using the initialized model
                sig = Signal(numTrials, nstep, timelength, trainFrac)
                sig.training_simulation(KpRange=[1, 10],
                                        tauRange=[1, 10],
                                        thetaRange=[1, 10])

                # In this case, since we are only loading the model, not trying to train it,
                # we can use function simulate and preprocess
                xData, yData = sig.simulate_and_preprocess(stdev=deviation)

                # Function to make predictions based off the simulation
                predictor.predict(sig, savePredict=False, plotPredict=False)

                error = np.concatenate((predictor.errors, error))
                kp_pred = np.concatenate((predictor.kpPredictions[:,
                                                                  0], kp_pred))
                theta_pred = np.concatenate(
                    (predictor.thetaPredictions[:, 0], theta_pred))
                tau_pred = np.concatenate(
                    (predictor.tauPredictions[:, 0], tau_pred))

                kp_true = np.concatenate((sig.kps, kp_true))
                theta_true = np.concatenate((sig.thetas, theta_true))
                tau_true = np.concatenate((sig.taus, tau_true))
                stdev = np.concatenate((np.full_like(predictor.errors,
                                                     deviation), stdev))

            sd = pd.DataFrame()
            sd['stdev'] = stdev
            sd['mse'] = error
            sd['kpPred'] = kp_pred
            sd['tauPred'] = tau_pred
            sd['thetaPred'] = theta_pred
            sd['kpTrue'] = kp_true
            sd['tauTrue'] = tau_true
            sd['thetaTrue'] = theta_true

            sd.to_csv(self.errorCSV, index=False)

        else:
            print(
                "Data exists for the parameters, proceeding to producing uncertainty estimate"
            )
            try:
                sd = pd.read_csv(self.errorCSV).drop(['Unnamed: 0'], axis=1)
                sd.drop(sd.tail(1).index, inplace=True)
            except:
                sd = pd.read_csv(self.errorCSV)
                sd.drop(sd.tail(1).index, inplace=True)

        self.errorDict = {}

        prefixes = ['kp', 'tau', 'theta']
        for prefix in prefixes:
            sd[prefix + 'Error'] = (sd[prefix + 'Pred'] - sd[prefix + 'True'])
            h = np.std(sd[prefix + 'Error'])
            self.errorDict[prefix] = h

            if self.plotUncertainty:
                plt.figure(dpi=200)
                plt.hist(sd[prefix + 'Error'], bins=100)
                plt.xlabel('Standard Error in ' + prefix)
                plt.ylabel("Frequency Distribution")

                plt.figure(dpi=200)
                plt.plot(sd[prefix + 'True'], sd[prefix + 'Pred'], '.')
                plt.plot(np.linspace(1, 10), np.linspace(1, 10), 'r--')
                plt.plot(np.linspace(1, 10), np.linspace(1, 10) + h, 'g--')
                plt.plot(np.linspace(1, 10), np.linspace(1, 10) - h, 'g--')

        def get_errors(self):
            return self.errorDict.values
Пример #14
0
# This is the file you run to have the test results
# In console, run: python run_test.py

from test_model import test_model

from Model import Model
from Model import GenderModel

# def model_1(a, b):
#     return a + b
#
# result = test_model(model_1)
#
# print "Result of model_1 is %d." % result
model1 = Model()
result = test_model(model1, 50)

model2 = GenderModel()
result = test_model(model2, 50)
Пример #15
0
 def __init__(self):
     model = Model()
     model.start_thread()
     MainView(model).update()
Пример #16
0
 def __init__(self):
     self.model = Model()
     self.view = View()
     self.velkommen_text = ''
Пример #17
0
def main():
    device = torch.device("cuda:0")
    num_datapoints = 1984
    views = 10
    load_points = True
    prepareDir(vis_directory)
    # Need to replace sundermeyer-random with something where we can
    # use predetermined poses, to plot each arch to visualize
    datagen = DatasetGenerator("",
                            "./data/cad-files/ply-files/obj_10.ply",
                            375,
                            num_datapoints,
                            "not_used",
                            device,
                            "sundermeyer-random",
                            random_light=False)

    # load model
    encoder = Encoder("./data/obj1-18/encoder.npy").to(device)
    encoder.eval()
    checkpoint = torch.load("./output/paper-models/10views/obj10/models/model-epoch199.pt")

    model = Model(num_views=views).cuda()
    model.load_state_dict(checkpoint['model'])
    model = model.eval()
    pipeline = Pipeline(encoder, model, device)

    # around x
    shiftx = np.eye(3, dtype=np.float)
    theta = np.pi / num_datapoints
    #theta = np.pi / 3
    shiftx[1,1] = np.cos(theta)
    shiftx[1,2] = -np.sin(theta)
    shiftx[2,2] = np.cos(theta)
    shiftx[2,1] = np.sin(theta)
    # around y
    shifty = np.eye(3, dtype=np.float)
    theta = np.pi / num_datapoints
    shifty[0,0] = np.cos(theta)
    shifty[0,2] = -np.sin(theta)
    shifty[2,2] = np.cos(theta)
    shifty[2,0] = np.sin(theta)
    # around z
    shiftz = np.eye(3, dtype=np.float)
    theta = np.pi / num_datapoints
    shiftz[0,0] = np.cos(theta)
    shiftz[0,1] = -np.sin(theta)
    shiftz[1,1] = np.cos(theta)
    shiftz[1,0] = np.sin(theta)
    predicted_poses = []
    predicted_poses_raw = []
    R_conv = np.eye(3, dtype=np.float)
    #R_conv = np.array([[ 0.5435,  0.1365,  0.8283],
    #                   [ 0.6597,  0.5406, -0.5220],
    #                   [-0.5190,  0.8301,  0.2037]])
    #R_conv = np.array([[-0.7132,  0.0407,  0.6998],
    #                   [ 0.1696, -0.9586,  0.2287],
    #                   [ 0.6802,  0.2818,  0.6767]])
    #R_conv = np.array([[-0.9959,  0.0797,  0.0423],
    #                   [ 0.0444,  0.0249,  0.9987],
    #                   [ 0.0786,  0.9965, -0.0283]])

    if load_points:
        # Try with points from the sphere
        points = np.load('./output/depth/spherical_mapping_obj10_1_500/points.npy', allow_pickle=True)
        num_datapoints = len(points)

        Rin = []
        for point in points:
            Rin.append(pointToMat(point))
            #Rin.append(np.matmul(pointToMat(point), shiftx))

    else:
        Rin = []
        for i in range(num_datapoints):
            # get data from fixed R and T vectors
            R_conv = np.matmul(R_conv, shiftx)
            R = torch.from_numpy(R_conv)
            Rin.append(R)

    t = torch.tensor([0.0, 0.0, 375])
    # Generate images
    data = datagen.generate_image_batch(Rin = Rin, tin = t, augment = False)

    # run images through model
    # Predict poses
    output = pipeline.process(data["images"])

    # evaluate how output confidence and each view changes with input pose
    plot_confidences(output.detach().cpu().numpy())
    if load_points:
        plot_flat_landscape(points, output[:,0:views].detach().cpu().numpy())

    rotation_matrices = []
    for i in range(views):
        start = views + i*6
        end = views + (i + 1)*6
        curr_poses = output[:,start:end]
        matrices = compute_rotation_matrix_from_ortho6d(curr_poses)
        euler_angles = compute_euler_angles_from_rotation_matrices(matrices)
        print(matrices.shape)
        print(matrices[0:3])
        print(euler_angles.shape)
        print(euler_angles[0:3])
        exit()
Пример #18
0
import logging
from flask import Flask, request, jsonify
from Model import Model

app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
model = app.model = Model()


@app.route('/getAllProducts')
def getAllProducts():
    '''Returns the entire list of products'''
    result = model.getProducts()

    return jsonify(result)


@app.route('/getProductById', methods=["POST"])
def getProductById():
    id = request.form["id"]
    result = model.getProductById(id)

    return jsonify(result)


@app.route('/buyProductById', methods=["POST"])
def buyProductById():
    productId = request.form["id"]
    paymentType = request.form["paymentType"]
    clientUsername = request.form["client"]
    numerOfProducts = request.form["quantity"]
Пример #19
0
import torchfile
import torch
from Model import Model
from Layers import Linear, ReLU

argument = argparse.ArgumentParser()
argument.add_argument("-config")
argument.add_argument("-i")
argument.add_argument("-og")
argument.add_argument("-o")
argument.add_argument("-ow")
argument.add_argument("-ob")
argument.add_argument("-ig")
parser = argument.parse_args()

myModel = Model()

# remove occurences of /n in all strings
with open(parser.config) as f:
    arr = f.readlines()
    # print("ARR",arr)
    num_layers = int(arr[0].replace('\n', ''))
    i = 1
    # print("NUM",num_layers)
    while (num_layers > 0):
        arr[i] = arr[i].replace('\n', '')
        v = arr[i].split(' ')
        # print("V0",v[0])
        # print("LAST",v[0][-1])
        if (v[0] == "relu"):
            myModel.addLayer(ReLU())
Пример #20
0
 def __init__(self, view):
     self._view = view
     self._model = Model()
Пример #21
0
    prec_multi, recall_multi = s[5] * 1. / s[3], s[5] * 1. / s[2]
    f1_multi = 2 * prec_multi * recall_multi / (prec_multi + recall_multi)
    return [loss_bi, loss_multi, f1_bi, f1_multi]    
    
map_relations = {}
data_train = load_data('data/STAC/train.json', map_relations)
data_test = load_data('data/STAC/test.json', map_relations)
vocab, embed = build_vocab(data_train)
print 'Dataset sizes: %d/%d' % (len(data_train), len(data_test))
model_dir, log_dir = FLAGS.prefix + '_model', FLAGS.prefix + '_log'

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with sess.as_default():
    model = Model(sess, FLAGS, embed, data_train)

    global_step = tf.Variable(0, name='global_step', trainable=False)
    global_step_inc_op = global_step.assign(global_step + 1)    
    epoch = tf.Variable(0, name='epoch', trainable=False)
    epoch_inc_op = epoch.assign(epoch + 1)

    saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=None, pad_step_number=True)
    
    summary_list = ['loss_bi', 'loss_multi', 'f1_bi', 'f1_multi']
    summary_num = len(summary_list)
    len_output_feed = 6

    if FLAGS.train:
        if tf.train.get_checkpoint_state(model_dir):
            print 'Reading model parameters from %s' % model_dir
Пример #22
0
                    self._client = Client(self.username, ip, port)
                    self._client.start(self._model.add_output)
                    self._client.listen(self.receive)
                except:
                    self._model.add_err_output("Error while connecting to the server.")

            elif command == "help":
                self._model.add_output("Possible commands:\t:open (username)@(ip):(port)\n\t\tUsed to open connection")

            elif command == "close":
                self._client.disconnect(self._model.add_output)
                self._client = None
                self.username = ""
            elif command == "save"
                
            else:
                self._model.add_err_output("Command not found.")

        else:
            self._client.send_message(args)
            self._model.add_message(self.username, args)

    def receive(self, username, message):
        self._model.add_message(username, message)


if __name__ == '__main__':
    Controller(Model())
    Gtk.main()
Пример #23
0
 def runMStreamIter(self, sampleNo):
     ParametersStr = ""
     model = Model(self.K, self.KIncrement, self.V, self.iterNum, self.alpha, self.beta, self.dataset,
                   ParametersStr, sampleNo, self.wordsInTopicNum)
     model.intialize(self.documentSet)
     model.gibbsSamplingIter(self.documentSet, self.iterNum, self.sampleNum, self.outputPath, self.wordList)
from Model import Model
from Model import train
from Model import test
from load import load_file
from Model import accuracy
from extract_triplets import all_triplets

from noggin import create_plot

#only for jupyter notebook
plotter, fig, ax = create_plot(metrics=["loss", "accuracy"])

#getting model, model params, data
learning_rate=0.1
model = Model(512, 50)
num_epochs = 1
batch_size = 32
margin = 0.1
path = r'data\resnet18_features.pkl'
triplets = load_file(r'data\triplets')

#training the model!!
train(model, num_epochs, margin, triplets, learning_rate=learning_rate, batch_size=batch_size)




Пример #25
0
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
vocab_file = '../Data/Dataset.dict.pkl'

if __name__ == '__main__':
    options = parser.parse_args()
    if options.toy:
        test_dataset = Dataset(data_type='test', length=100)
    else:
        test_dataset = Dataset(data_type='test')
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=options.bs,
                             shuffle=False,
                             collate_fn=batch_collect,
                             drop_last=True)
    model = Model(options)
    model = model.to(device)
    saved_state = torch.load(options.sustain, map_location=device)
    model.load_state_dict(saved_state)
    print('Already Load Pre Train Model')
    dic = {}
    print('Load Vocab File')
    with open(vocab_file, 'rb') as f:
        dict_data = pickle.load(f)
        for t in tqdm(dict_data):
            token, index, _, _ = t
            dic[index] = token
    dic[options.total_words] = 'PAD'
    generation = infer(options, model, device, test_loader, 5, dic)
    print('It is time to save generation sentences')
    with open('../data/generation.pkl', 'wb') as f:
Пример #26
0
import numpy as np
import matplotlib.pyplot as plt

from Model import Model

l = [1, 20, 20, 20, 1]
m = Model("bvp", l, 1.0, 50000)

epoches = 0
batch_size = 10
while True:
    epoches = epoches + 1
    m.train({
        m.varIn: np.array([[0], [1]]),
        m.varOut: np.array([[1], [0]]),
        m.varAux: np.random.uniform(0.0, 1.0, batch_size).reshape(-1, 1),
    })
    c = m.sess.run(
        m.loss, {
            m.varIn: np.array([[0], [1]]),
            m.varOut: np.array([[1], [0]]),
            m.varAux: np.random.uniform(0.0, 1.0, 10).reshape(-1, 1),
        })
    if c < 0.005:
        print("Converge with Error =", c)
        break
    elif epoches > 100:
        print("Fail to converge.")
        break
    else:
        print("Error =", c)
Пример #27
0
import os

if __name__ == '__main__':

    # training and validation error collector
    ec = ErrorCollector()

    X, C, X_tst, C_tst = load_isolet()
    #print("X shape: ", X.shape[0])
    #print("X shape: ", X.shape[1])

    # Parameters and model
    epochs = 5
    #learning_rate = 0.001
    learning_rate = 0.01
    model = Model(n_in=X.shape[1], n_hidden=300, n_out=26, n_layer=1)
    batch_size = 40

    # setup logging
    log_file_name = 'logs' + os.sep + 'Log' + '_ep' + str(
        epochs) + '_hidu' + str(model.n_hidden) + '_hidl' + str(
            model.n_layer) + '_lr' + str(learning_rate) + '.log'
    logging.basicConfig(filename=log_file_name, level=logging.INFO)

    # Batch Normalize
    bn = BatchNormalizer(X, C, batch_size=batch_size, shuffle=True)
    train_batches = bn.getBatches(X, C, is_validation=False)
    test_batches = bn.getBatches(X_tst, C_tst, test=True)

    print('examples to train: ', train_batches.examples_train.shape)
Пример #28
0
def main(_):
    # TODO: do not pass source label in target mode (it's not needed!)
    """Main function for Deep-Reconstruction Classification Network - DRCN"""
    tf.reset_default_graph()
    # Load source and target data set
    source_size = 32
    target_size = 32
    if FLAGS.source == 'mnist':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnist(FLAGS.channel_size, False)
    elif FLAGS.source == 'mnistm':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.source == 'svhn':
        (x_train_s, y_train_s), (x_test_s, y_test_s) = dp.load_svhn(FLAGS.channel_size, False)
    else:
        sys.exit('For the source set you have to choose one of [svhn, mnist, mnistm]!')

    if FLAGS.target == 'mnist':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnist(FLAGS.channel_size, False)
    elif FLAGS.target == 'mnistm':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_mnistm(FLAGS.channel_size)
    elif FLAGS.target == 'svhn':
        (x_train_t, y_train_t), (x_test_t, y_test_t) = dp.load_svhn(FLAGS.channel_size, False)
    else:
        sys.exit('For the target set you have to choose one of [svhn, mnist, mnistm]!')

    # Create data placeholders.
    placeholder_x_s = tf.placeholder(tf.float32, shape=[None, source_size, source_size, FLAGS.channel_size])
    placeholder_y_s = tf.placeholder(tf.int32, shape=[None])
    placeholder_x_t = tf.placeholder(tf.float32, shape=[None, target_size, target_size, FLAGS.channel_size])
    placeholder_y_t = tf.placeholder(tf.int32, shape=[None])
    placeholder_training = tf.placeholder_with_default(tf.constant(True), shape=[])
    ds_source, ds_target = create_dataset(placeholder_x_s, placeholder_y_s, placeholder_x_t, placeholder_y_t)

    iterator = tf.data.Iterator.from_structure(ds_source.output_types, ds_source.output_shapes)
    x, y = iterator.get_next()

    # Init model
    drcn = Model(FLAGS.opt)
    drcn.train_source(x, y, placeholder_training)
    if FLAGS.source_only.lower() == 'false':
        drcn.train_target(x, y)

    source_iterator = iterator.make_initializer(ds_source)
    target_iterator = iterator.make_initializer(ds_target)

    # Configs
    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # Stats
    source_acc_train = []
    source_acc_test = []
    target_acc_train = []
    target_acc_test = []
    source_loss_train = []
    target_loss_train = []

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        if tf.train.latest_checkpoint(FLAGS.model_dir) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
        for epoch in range(FLAGS.total_epochs):
            print('Epoch: ', epoch)
            if FLAGS.source_only.lower() == 'false':
                sess.run(target_iterator, feed_dict={placeholder_x_t: x_train_t, placeholder_y_t: y_train_t})
                i = 0
                total_loss = 0
                total_acc = 0
                try:
                    with tqdm(total=len(x_train_t)) as pbar:
                        while True:
                            _, out_loss, source_acc = sess.run([drcn.optimize_reconstruction, drcn.rec_loss,
                                                               drcn.target_class_acc],
                                                               feed_dict={placeholder_training: False})
                            i += 1
                            total_loss += out_loss
                            total_acc += source_acc
                            pbar.update(FLAGS.batch_size)
                            # pbar.write(str(source_acc))
                except tf.errors.OutOfRangeError:
                    print('Done with train target epoch.')
                print(total_acc / i)
                print(total_loss / i)
                target_acc_train.append((epoch, total_acc/float(i)))
                target_loss_train.append((epoch, total_loss/float(i)))
            sess.run(target_iterator, feed_dict={placeholder_x_t: x_test_t, placeholder_y_t: y_test_t})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_test_t)) as pbar:
                    while True:
                        out_loss, source_acc = sess.run([drcn.source_class_loss, drcn.source_class_acc],
                                                        feed_dict={placeholder_training: False})
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with evaluation target epoch.')
            print(total_acc / i)
            print(total_loss / i)
            target_acc_test.append((epoch, total_acc / float(i)))
            sess.run(source_iterator, feed_dict={placeholder_x_s: x_train_s, placeholder_y_s: y_train_s})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_train_s)) as pbar:
                    while True:
                        _, out_loss, source_acc = sess.run([drcn.optimize_class, drcn.source_class_loss,
                                                            drcn.source_class_acc])
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with source train epoch.')
            print(total_acc/i)
            print(total_loss/i)
            source_acc_train.append((epoch, total_acc/float(i)))
            source_loss_train.append((epoch, total_loss/float(i)))
            sess.run(source_iterator, feed_dict={placeholder_x_s: x_test_s, placeholder_y_s: y_test_s})
            i = 0
            total_loss = 0
            total_acc = 0
            try:
                with tqdm(total=len(x_test_s)) as pbar:
                    while True:
                        out_loss, source_acc = sess.run([drcn.source_class_loss, drcn.source_class_acc],
                                                        feed_dict={placeholder_training: False})
                        i += 1
                        total_loss += out_loss
                        total_acc += source_acc
                        pbar.update(FLAGS.batch_size)
            except tf.errors.OutOfRangeError:
                print('Done with evaluation source epoch.')
            print(total_acc / i)
            print(total_loss / i)
            source_acc_test.append((epoch, total_acc/float(i)))

            saver.save(sess, FLAGS.model_dir, global_step=epoch)
    # Save stats for visualization
    with open(os.path.join(FLAGS.model_dir, 'stats.pkl'), 'wb') as f:
        pickle.dump({'source_acc_train': source_acc_train, 'source_acc_test': source_acc_test,
                     'target_acc_train': target_acc_train, 'target_acc_test': target_acc_test,
                     'source_loss_train': source_loss_train, 'target_loss_train': target_loss_train}, f)
Пример #29
0
train = Dataset(config.filename_train)
config.vocab_words = load_vocab(config.filename_words)
config.vocab_tags = load_vocab(config.filename_tags)
config.nwords = len(config.vocab_words)
config.ntags = len(config.vocab_tags)
config.processing_word = get_processing_word(config,
                                             config.vocab_words,
                                             lowercase=True)
config.processing_tag = get_processing_word(config,
                                            config.vocab_tags,
                                            lowercase=False,
                                            allow_unk=False)
config.embeddings = (get_trimmed_glove_vectors(config.filename_trimmed)
                     if config.use_pretrained else None)

model = Model(config)
model.build()
dev = Dataset(config.filename_dev, config.processing_word,
              config.processing_tag, config.max_iter)
train = Dataset(config.filename_train, config.processing_word,
                config.processing_tag, config.max_iter)

# train model
model.train(train, dev)

# testmodel
testmodel = Model(config)
testmodel.build()
testmodel.restore_session(config.dir_model)

test = Dataset(config.filename_test, config.processing_word,
Пример #30
0


class FilePaths:
	"filenames and paths to data"
	fnCharList = '../model/charList.txt'
	fnAccuracy = '../model/accuracy.txt'
	fnTrain = '../data/'
	fnInfer = '../data/test2.png'
	fnCorpus = '../data/corpus.txt'




decoderType = DecoderType.BestPath
model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True)
		


names = [os.path.basename(x) for x in glob.glob(inputfolder+'/*.jpg')]

for fno in range(0,len(names)):
    
    print("Processing for "+names[fno])
    
    img = cv2.imread(inputfolder+'/'+names[fno])

    # rorate image if its row is greater than column
    if img.shape[0]>img.shape[1]:
        img=cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)