コード例 #1
0
def plot_results():
    retr_results = data.pickle_from_file('output/retr_context_10')
    retr_results = {'Degree (window)': [0.22290305491606582,
                       0.2239404496699994,
                       0.22351183191703122,
                       0.22293583927185456,
                       0.2216027852882311,
                       0.22232860216650002,
                       0.22230162622918934,
                       0.22287683186704185,
                       0.22266252053221772,
                       0.22237418794670616],
                 'PageRank (window)': [0.21772129149181993,
                              0.21884861149427587,
                              0.22063142971295358,
                              0.21893898241891538,
                              0.21973766615441442,
                              0.22054672890564322,
                              0.22099589130745473,
                              0.22129686184085004,
                              0.22148942934157456,
                              0.22147928890310792],
                    'PageRank (sentence)': [0.22056586008664569]*10,
                    'Degree (sentence)': [0.21784622825075944]*10}
                    #~ #'PageRank (sentence)':[0.223649757653]*10,
                    #~ #'Weighted degree (sentence)':[0.223449136101]*10}
    pp.pprint(retr_results)
    plotter.plot(range(1,11),retr_results,'retrieval score','n, context size','',[1,10,.216,.225], legend_place="lower right")
コード例 #2
0
 def plot():
     logger.info("Plotting %s", timestamp)
     config = read_config()
     self.menu.message = "Plotting..."
     plotter.plot(
         config.get('directories', 'sessions'),
         config.get('directories', 'images'),
         timestamp)
     self.menu.current_item = self._root_item
コード例 #3
0
def TEST_customize_02():
    f=FuncHeaviside(xlimit=0.3)

    # One can customize the final function as follow (in this example,
    # reverse of heaviside)
    def myfunc(x):
        y=1-f(x)
        return y
    
    from plotter import plot
    plot(myfunc)
コード例 #4
0
def TEST_customize_01():
    f=FuncStiffPulse(xlimit=0.3,stiffness=40,nbPeriods=20)

    # One can customize the final function as follow (in this example,
    # a linear transform)
    def myfunc(x):
        y=5*f(x)+2
        return y
    
    from plotter import plot
    plot(myfunc, step=0.001)
コード例 #5
0
def match(img1, img2, K, distort):
	#plotter.plot2(img1)
	timeStart = time.time()
	img1 = cv2.undistort(img1,K,distort)
	img2 = cv2.undistort(img2,K,distort)
	pts1, pts2, des1, des2 = correspondences.getCorrespondences(img1,img2)
	print("Time for correspondences: "+str(time.time()-timeStart))
	if(len(pts1)<8):
		print("ERROR: <8 correspondeces")
		return
	timeStart = time.time()
	F, mask = computervision.findFundamentalMatrix(K,pts1,pts2)
	print("Time for Fundamental: "+str(time.time()-timeStart))
	#F = F/np.linalg.norm(F)
	pts1 = pts1[mask.ravel()==1]
	pts2 = pts2[mask.ravel()==1]
	des1 = [des1[ind] for ind, x in enumerate(mask) if x==1]
	des2 = [des2[ind] for ind, x in enumerate(mask) if x==1]
	timeStart = time.time()
	if(pts1.shape[0]>8):
		F = computervision.nonlinearOptimizationFundamental(F,K,pts1,pts2)
	print("Time for nonlinearOptimizationFundamental: "+str(time.time()-timeStart))

	testFundamentalMatrix(F,pts1,pts2)
	lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
	lines1 = lines1.reshape(-1,3)
	img5 = drawlines(img1,img2,lines1,pts1,pts2,K)
	lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
	lines2 = lines2.reshape(-1,3)
	img3 = drawlines(img2,img1,lines2,pts2,pts1,K)
	
	p1, p2, X, rot, trans = computervision.getCameraMatrix(F,K,pts1,pts2)
	print("Translation: "+str(trans))
	plotter.plot(rot,trans,X,img1,pts1)
	reprojectionError(X,pts1,pts2,p1,p2)
	cubePosition = X[0]#np.array([0,0,50,1])#
	projectPoint(img5,X,p1)
	projectPoint(img3,X,p2)
	projectCube(img5,p1,cubePosition)
	projectCube(img3,p2,cubePosition)
	vis = showCombinedImgs(img5,img3)
	drawCorrespondences(vis,pts1,pts2)
	cv2.imshow("test", vis)
	return patch.makePatches(pts1,pts2,X,des1,des2)
コード例 #6
0
ファイル: classifier2.py プロジェクト: name3anad/SVMDemo
 def train_classifier(self, n_folds=10, learning_curve=True,
                      start_size=4000, inc=1000):
     """ Trains the classifier. Function can also display learning curve."""
     print "training"
     size = len(self.X_training)
     train_accs = []
     cv_accs = []
     sizes = []
     if learning_curve:
         size = start_size
     while size <= len(self.X_training):
         print size
         sizes.append(size)
         X = self.X_training[:size]
         Y = self.Y_training[:size]
         classifier, train_acc, cv_acc = self.cross_validation(X, Y)
         train_accs.append(train_acc)
         cv_accs.append(cv_acc)
         size += inc
     self.classifier = classifier
     if learning_curve:
         plot(sizes, ys=[train_accs, cv_accs], legs=['Training', 'CV'])
コード例 #7
0
ファイル: readfile.py プロジェクト: kundor/gpsdata
def index(req, n_file, n_type):
    """Read GPS observation data and show summary or TEC plot.

    This function is called for mod_python in a web server (apache).
    """
    database = '/web/gps/data/'
    filedate = time.strptime(n_file[5:11], '%y%m%d')
    url = os.path.join(database, n_file[0:4], str(filedate.tm_year), n_file[7:9],
                       'rinex', n_file)
    # Parse RINEX file
    dat = read_file(url)
    if n_type.lower() == 'summary':
        # Return summary info
        req.content_type = "text/plain"
        req.write(dat.header_info())
    elif n_type.lower() == 'tec':
        # Return TEC plot
        fig = plotter.plot(dat, 'TEC', 'web')
        with io.BytesIO() as f:
            fig.savefig(f, format='png')
            req.content_type = "image/png"
            req.write(f.getvalue())
コード例 #8
0
ファイル: robust_td.py プロジェクト: MrFive5555/Byrd-TD
def main(args):
    mc = args.mc
    asbes, aces = [], []
    for i in range(mc):
        # configs
        if args.network in ('h1b1', 'h3b1', 'h2b1', 'h4b1'):
            env = make_env('simple_spread_custom_local_12')
            attacker = [1,3,5,7,9,11]
            b = 1  
        elif args.network == 'h3b2':
            env = make_env('simple_spread_custom_local_18')
            attacker = [1,2,4,5,7,8,10,11,13,14,16,17]
            b = 2
        
        try:
            w = np.loadtxt(args.network+'.txt')
        except:
            if args.network == 'complete':
                env = make_env('simple_spread_custom_local_9')
                w = np.ones((env.n,env.n))
                attacker = [0,1]
                b = len(attacker) 
            elif args.network == 'renyi':
                env = make_env('simple_spread_custom_local_9')
                _, _, attacker, connectivity = renyigraph.Renyi(9, 0.7, 0.2)
                while len(attacker) > 1 or len(attacker) < 1:
                    _, _, attacker, connectivity = renyigraph.Renyi(9, 0.7, 0.2)
                w = connectivity
                b = int(len(attacker))
            elif args.network == 'sparse':
                env = make_env('simple_spread_custom_local_18')
                attacker = []
                while len(attacker) > 1 or len(attacker) < 1:
                    _, _, attacker, connectivity = renyigraph.Renyi(18, 0.4, 0.05)
                w = connectivity
                b = int(len(attacker))
            else:
                raise Exception("Implemented network: h1b1, h2b1, h3b1, h3b2, renyi, complete.")        
        mode = args.attack
        
        watch = np.setdiff1d(np.arange(env.n), attacker)
        if len(args.vars) == 0:
            trainers = [
                trainer_mean(env, 'Mean', watch=watch), 
                trainer_trim(env, 'Trim', b=b, watch=watch),
                trainer_mean(env, 'Mean_att', attacker, mode),
                trainer_trim(env, 'Trim_att', attacker, mode, b),
                trainer_local(env, 'Local', attacker, watch=watch)
            ]
        else:
            trainers = sum([[
                trainer_mean(env, 'Mean', watch=watch, rew_var=var), 
                trainer_trim(env, 'Trim', b=b, watch=watch, rew_var=var),
                trainer_mean(env, 'Mean_att', attacker, mode, rew_var=var),
                trainer_trim(env, 'Trim_att', attacker, mode, b, rew_var=var),
                trainer_local(env, 'Local', attacker, watch=watch, rew_var=var)
            ] for var in args.vars], start = [])
        network = decent_network(env, w, trainers)
        asbe, ace = network.train(args.epoch, args.horizon, args.lr, 
                                  args.lam, args.diminish, args.render)
        asbes.append(asbe)
        aces.append(ace)
        
    # write result
    sbe_name = 'sbe' + '_' + args.network \
                + '_a' + str(args.attack) \
                + '_lam' + str(args.lam) \
                + (('_vars') if len(args.vars) > 0 else '') \
                + '.pkl'
    ce_name = 'ce' + '_' + args.network \
                + '_a' + str(args.attack) \
                + '_lam' + str(args.lam) \
                + (('_vars') if len(args.vars) > 0 else '') \
                + '.pkl'
    dump_file_in_cache(sbe_name, asbes)
    dump_file_in_cache(ce_name, aces)
    
    if args.plot:
        plotter.plot(args)
コード例 #9
0
    # plotter.plot(lonGrid, latGrid, std_dev, out_file=out_file, levels=np.arange(0,6), extend='max')
    # out_file = os.environ['variab_dir']+'/eulerian_storm_track/model/%s.%s.png'%(os.environ['CASENAME'], season)
    # plotter.plot(lonGrid, latGrid, std_dev, out_file=out_file, levels=np.arange(0,6), extend='max')

    season = 'djf'
    model_std_dev = est.model_std_dev(eddies,
                                      int(os.environ['FIRSTYR']),
                                      time,
                                      season=season)
    out_file = os.environ[
        'variab_dir'] + '/eulerian_storm_track/model/PS/%s.%s.ps' % (
            os.environ['CASENAME'], season.upper())
    plotter.plot(lonGrid,
                 latGrid,
                 model_std_dev,
                 out_file=out_file,
                 title='%s (%s to %s)' %
                 (season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']),
                 levels=np.arange(0, 6),
                 extend='max')
    out_file = os.environ[
        'variab_dir'] + '/eulerian_storm_track/model/%s.%s.png' % (
            os.environ['CASENAME'], season.upper())
    plotter.plot(lonGrid,
                 latGrid,
                 model_std_dev,
                 out_file=out_file,
                 title='%s (%s to %s)' %
                 (season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']),
                 levels=np.arange(0, 6),
                 extend='max')
コード例 #10
0
def train(current_model, target_model, env, optimizer, args):
    start_time = datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
    log = {}
    setup_logger('{}_log'.format(args.env),
                 r'{}{}_{}_log'.format(args.log_dir, args.env, start_time))
    log['{}_log'.format(args.env)] = logging.getLogger('{}_log'.format(
        args.env))
    d_args = vars(args)
    for k in d_args.keys():
        log['{}_log'.format(args.env)].info('{0}: {1}'.format(k, d_args[k]))

    #linearly decrease epsilon from 1 to epsilon end over epsilon decay steps
    epsilon_start = 1.0
    epsilon_by_frame = lambda frame_idx: (
        args.exp_epsilon_end + max(0, 1 - frame_idx / args.exp_epsilon_decay) *
        (epsilon_start - args.exp_epsilon_end))

    if args.gpu_id >= 0:
        device = torch.device('cuda:{}'.format(args.gpu_id))
    else:
        device = torch.device('cpu')

    replay_buffer = ReplayBuffer(args.buffer_size, device)
    start = time.time()

    losses = []
    standard_losses = []
    worst_case_losses = []
    all_rewards = []
    worst_case_rewards = []
    #initialize as a large negative number to save first
    max_score = -10000
    episode_reward = 0

    state = env.reset()
    state = torch.FloatTensor(state).unsqueeze(0).to(device)

    for frame_idx in range(1, args.total_frames + 1):
        action_epsilon = epsilon_by_frame(frame_idx)
        action = current_model.act(state, action_epsilon)
        next_state, reward, done, info = env.step(action)
        episode_reward += reward

        next_state = torch.FloatTensor(next_state).unsqueeze(0).to(device)
        action = torch.LongTensor([action]).to(device)
        #scale rewards between -1 and 1
        reward = torch.clamp(torch.FloatTensor([reward]).to(device),
                             min=-1,
                             max=1)
        done = torch.FloatTensor([info]).to(device)

        replay_buffer.push(state, action, reward, next_state, done)

        state = next_state

        if done and not info:
            state = env.reset()
            state = torch.FloatTensor(state).unsqueeze(0).to(device)

        elif info:
            state = env.reset()
            state = torch.FloatTensor(state).unsqueeze(0).to(device)
            all_rewards.append(episode_reward)
            episode_reward = 0
            plot(frame_idx, all_rewards, losses, standard_losses,
                 worst_case_losses, args, start_time)

            if frame_idx % 5 == 0:
                test_reward = test(args, current_model, env, device)
                log['{}_log'.format(args.env)].info(
                    "Steps: {}, Test reward: {}, Time taken: {:.3f}s".format(
                        frame_idx, test_reward,
                        time.time() - start))
                if args.save_max and test_reward >= max_score:
                    max_score = test_reward
                    state_to_save = current_model.state_dict()
                    torch.save(
                        state_to_save,
                        '{}{}_{}_best.pt'.format(args.save_model_dir, args.env,
                                                 start_time))

        if frame_idx > args.replay_initial and frame_idx % (
                args.batch_size / args.updates_per_frame) == 0:

            lin_coeff = min(
                1, (frame_idx + 1) /
                max(args.attack_epsilon_schedule, args.total_frames))

            attack_epsilon = lin_coeff * args.attack_epsilon_end
            kappa = (1 - lin_coeff) * 1 + lin_coeff * args.kappa_end

            data = replay_buffer.sample(args.batch_size)
            if args.robust:
                loss, standard_loss, worst_case_loss = _compute_robust_loss(
                    current_model, target_model, data, attack_epsilon, kappa,
                    args.gamma, device, args)
            else:
                loss, standard_loss, worst_case_loss = _compute_loss(
                    current_model, target_model, data, args.gamma, device)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            losses.append(loss.data.item())
            standard_losses.append(standard_loss.data.item())
            worst_case_losses.append(worst_case_loss.data.item())

        if frame_idx % (1000 *
                        (args.batch_size / args.updates_per_frame)) == 0:
            target_model.load_state_dict(current_model.state_dict())
    #save final model
    state_to_save = current_model.state_dict()
    torch.save(
        state_to_save, '{}{}_{}_last.pt'.format(args.save_model_dir, args.env,
                                                start_time))

    log['{}_log'.format(args.env)].info("Done in {:.3f}s".format(time.time() -
                                                                 start))
コード例 #11
0
import pickle
from plotter import plot, prepare_plots
from EntityClass import EntityClass

fil = 'PastTrades.h5'
minutes = 91  # largest time before NaNs appear

data = pd.read_hdf(fil, key='date')
print(pickle.load(open("outputs.p", "rb")))

try:
    times = pickle.load(open("outputs.p", "rb"))
except:
    times = dict()


prepare_plots([EntityClass('temp')])
for column in data:
    if column not in times.keys():
        plot(list(data[column][:minutes]), 'temp')
        sell = int(input("enter sell time:"))
        if sell == -1:
            times[column] = [0] * minutes
            continue
        buy = int(input("enter buy time:"))
    
        times[column] = [0] * minutes
        times[column][sell] = -1
        times[column][buy] = 1
pickle.dump(times, open("outputs.p", "wb"))
コード例 #12
0
ファイル: options.py プロジェクト: lambdalisue/Histogramy
def plot(data, model, criterions, opts):
    import plotter
    return plotter.plot(data, model, criterions, opts)
コード例 #13
0
"""
Example on how to use plotter class for real time visualization
"""

from plotter import plot, subplot, series
import time
import numpy as np

size = 4
plt = plot(columns=size, name="Realtime plot");

colors = ["r","b"]

for i in range(size**2):
    sub = subplot(name="Subplot " + str(i))
    plt.add(sub)
    for j in range(2):
        seq = series(x = np.linspace(0, 100, 30), y = np.linspace(0, 100, 30), name="curve")
        seq.color = colors[j]
        sub.add(seq)

plt.compile();

idx = 100.0;

while True:
    
    for sub in plt.subplots:  
        mul = 0
        for seq in sub.sequences:              
            seq.y = (seq.y + 1) % (idx) + mul;
コード例 #14
0
def TEST_FuncPorte():
    f=FuncPorte(xinf=0.3,xsup=0.4)
    from plotter import plot
    plot(f)
コード例 #15
0
ファイル: runnerRastrigin.py プロジェクト: hwarlley/ai-fer
                          elitism=elitism,
                          populationSize=populationSize,
                          mutationProbability=mutationProbability,
                          mutationScale=mutationScale,
                          numIterations=numIterations,
                          errorTreshold=errorTreshold)

    print_every = 100  # Print the output every this many iterations
    plot_every = 15000  # Plot the actual vs estimated functions every this many iterations

    # emulated do-while loop
    done = False
    while not done:
        done, iteration, best = GA.step()

        if iteration % print_every == 0:
            print "Error at iteration %d = %f" % (iteration,
                                                  errorClosure(best))

        if iteration % plot_every == 0:
            NN.setWeights(best)
            plotter.plot(X_train, y_train, NN.output(X_train))
            plotter.plot_surface(X_train, y_train, NN)

    print "Training done, running on test set"
    NN.setWeights(best)

    print "Error on test set: ", NN.forwardStep(X_test, y_test)
    plotter.plot(X_test, y_test, NN.output(X_test))
    plotter.plot_surface(X_test, y_test, NN)
コード例 #16
0
def TEST_FuncStiffPulse():
    f=FuncStiffPulse(xlimit=0.3,stiffness=50,nbPeriods=15)
    from plotter import plot
    plot(f, step=0.001)
コード例 #17
0
def TEST_FuncHeaviside():
    f=FuncHeaviside(xlimit=0.3)
    from plotter import plot
    plot(f)
コード例 #18
0
def TEST_FuncCosinus():
    f=FuncCosinus(nbPeriods=20)
    from plotter import plot
    plot(f, step=0.001)
コード例 #19
0
def TEST_FuncStiffExp():
    f=FuncStiffExp(xlimit=0.3,stiffness=20.)
    from plotter import plot
    plot(f)
コード例 #20
0
def TEST_FuncChapeau():
    f=FuncChapeau(xlimit=0.3)
    from plotter import plot
    plot(f)
コード例 #21
0
ファイル: classifier.py プロジェクト: emanlodovice/SVMDemo
print 'Training set size: ' + str(len(training_set))
print 'Test set size: ' + str(len(test_set))

i = 1000
sizes = []
training_acc = []
cv_acc = []
while i < len(training_set):
    t = training_set[:i]
    c, training, cv = cross_validation(t)
    sizes.append(i)
    training_acc.append(training)
    cv_acc.append(cv)
    i += 1000
    # print i
plot(sizes, ys=[training_acc, cv_acc], legs=['Training', 'CV'])
# Get accuracy
lr_classifier = cross_validation(training_set)[0]
lr_accuracy = classify.accuracy(lr_classifier, test_set)
# print "Classifier accuracy on test: " + str(lr_accuracy)
lr_accuracy_training = classify.accuracy(lr_classifier, training_set)
# print "Classifier accuracy on training: " + str(lr_accuracy_training)

diagnose(lr_classifier, test_set)

sentence = 'a'
while True:
    sentence = raw_input('Test me: ')
    if sentence == '':
        break
    features = feature_extractor(sentence)
コード例 #22
0
ファイル: run.py プロジェクト: zestree/easy21
import sys, os
from env.easy21 import Easy21Env
import agent as agent
import plotter

_PATH_ = os.path.dirname(os.path.dirname(__file__))


if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    env = Easy21Env()

    #state = env.getInitState()
    #state, reward = env.step(state, 'stick')
    #print state, reward


    Q, policy = agent.mc_control_epsilon_greedy(env, 1000)

    plotter.plot(Q)
コード例 #23
0
#! /usr/bin/env python

import pcap_parser
import power_parser
import sys
import plotter


if __name__ == '__main__':
    experiment_timestamp = sys.argv[1].split('/')[-1]
    pcap_parser.parse(experiment_timestamp)
    power_parser.parse(experiment_timestamp)
    plotter.plot(experiment_timestamp)
コード例 #24
0
def train(config, args):
    if not os.path.exists("./results"):
        os.makedirs("./results")

    if args.save_model and not os.path.exists("./models"):
        os.makedirs("./models")

    import pybulletgym
    warnings.filterwarnings("ignore")
    eps_bounds = args.reacher_epsilon_bounds  # just aliasing with shorter variable name
    utils_object = utils.GeneralUtils(args)

    if args.tune_run:
        if args.prioritized_replay:
            args.alpha = float(config["alpha"])
            args.beta = float(config["beta"])
            args.discount = float(config.get("discount", args.discount))
            args.tau = float(config.get("tau", args.tau))
        elif args.custom_env and args.use_hindsight:
            eps_bounds = [
                float(config["epsilons"][0]),
                float(config["epsilons"][1])
            ]
            args.seed = int(config["seed"])
        else:
            args.discount = float(config.get("discount", args.discount))
            args.tau = float(config.get("tau", args.tau))

    if args.custom_env:
        gym.envs.register(
            id='OurReacher-v0',
            entry_point='our_reacher_env:OurReacherEnv',
            max_episode_steps=50,
            reward_threshold=100.0,
        )

        # this is assuming we only use epsilon for custom env or fetch reach, where episode tsteps is 50 !!!!
        max_episode_steps = 50

        # retrieve epsilon range
        [a, b] = eps_bounds
        epsilons = utils_object.epsilon_calc(a, b, max_episode_steps)
        env = gym.make('OurReacher-v0', epsilon=epsilons[0], render=False)
    else:
        env = gym.make(args.env)

    if utils_object.fetch_reach and utils_object.args.fetch_reach_dense:
        env.env.reward_type = "dense"

    # Set seeds
    env.seed(int(args.seed))
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    if utils_object.fetch_reach:
        state_dim = env.reset()["observation"].shape[0]
    else:
        state_dim = env.observation_space.shape[0]
    if args.use_hindsight:  # include both current state and goal state
        if args.custom_env:
            state_dim += 2  # reacher nonsense; goal = (x, y)
        elif utils_object.fetch_reach:
            state_dim += 3  # include fetchreach goal state (x,y,z position)
        else:
            state_dim *= 2

    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])

    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "max_action": max_action,
        "discount": args.discount,
        "tau": args.tau,
    }

    # Initialize policy
    if args.policy == "TD3":
        # Target policy smoothing is scaled wrt the action scale
        kwargs["policy_noise"] = args.policy_noise * max_action
        kwargs["noise_clip"] = args.noise_clip * max_action
        kwargs["policy_freq"] = args.policy_freq
        kwargs["prioritized_replay"] = args.prioritized_replay
        kwargs["use_rank"] = args.use_rank
        kwargs["use_hindsight"] = args.use_hindsight

        policy = TD3.TD3(**kwargs)
    elif args.policy == "OurDDPG":
        policy = OurDDPG.DDPG(**kwargs)
    elif args.policy == "DDPG":
        policy = DDPG.DDPG(**kwargs)

    exp_descriptors = [
        args.policy, 'CustomReacher' if args.custom_env else args.env,
        f"{'rank' if args.use_rank else 'proportional'}PER" if
        args.prioritized_replay else '', 'HER' if args.use_hindsight else '',
        f"{args.decay_type}decay-eps{f'{eps_bounds[0]}-{eps_bounds[1]}' if eps_bounds[0] != eps_bounds[1] else f'{eps_bounds[0]}'}"
        if args.custom_env else "", f"k{args.k}",
        datetime.now().strftime('%Y%m%d%H%M')
    ]
    if args.tune_run:
        # fudgy: assumes tune_run for non-HER experiments
        exp_descriptors = [
            args.policy, 'CustomReacher' if args.custom_env else args.env,
            f"{'rank' if args.use_rank else 'proportional'}PER"
            if args.prioritized_replay else '', f"tau{args.tau}",
            f"discount{args.discount}",
            f"alpha{args.alpha}" if args.prioritized_replay else '',
            f"beta{args.beta}" if args.prioritized_replay else '',
            f"k{args.k}",
            datetime.now().strftime('%Y%m%d%H%M')
        ]

    exp_descriptors = [x for x in exp_descriptors if len(x) > 0]
    file_name = "_".join(exp_descriptors)

    if args.load_model != "":
        policy_file = file_name if args.load_model == "default" else args.load_model
        policy.load(f"./models/{policy_file}")

    if args.prioritized_replay:
        replay_buffer = utils.PrioritizedReplayBuffer(state_dim,
                                                      action_dim,
                                                      args.max_timesteps,
                                                      args.start_timesteps,
                                                      alpha=args.alpha,
                                                      beta=args.beta)
    else:
        replay_buffer = utils.ReplayBuffer(state_dim, action_dim)

    # Evaluate untrained policy
    evaluations = [
        eval_policy(policy, args.env, args.seed, utils_object=utils_object)
    ]

    state, done = env.reset(), False

    original_episode_reward = 0
    episode_reward = 0
    episode_timesteps = 0
    episode_num = 0

    trajectory = []

    for t in range(int(args.max_timesteps)):

        episode_timesteps += 1
        x, goal = utils_object.compute_x_goal(state, env)

        # Select action randomly or according to policy
        if t < args.start_timesteps:
            action = env.action_space.sample()
        else:
            action = (policy.select_action(np.array(x)) + np.random.normal(
                0, max_action * args.expl_noise, size=action_dim)).clip(
                    -max_action, max_action)

        # Perform action
        next_state, reward, done, _ = env.step(action)
        done_bool = float(
            done) if episode_timesteps < env._max_episode_steps else 0

        if args.use_hindsight:
            if utils_object.fetch_reach:
                goal = state["desired_goal"]
                next_x = np.concatenate(
                    [np.array(next_state["observation"]), goal])
            else:
                # env.set_goal(goal)
                next_x = np.concatenate([np.array(next_state), goal])
        elif utils_object.fetch_reach:
            next_x = np.array(next_state["observation"])
        else:
            next_x = next_state

        # Store data in replay buffer
        if not args.use_hindsight:
            replay_buffer.add(x, action, next_x, reward, done_bool)

        trajectory.append((state, action, next_state, reward, done_bool))

        state = next_state
        episode_reward += reward
        if args.custom_env:
            original_episode_reward += env.original_rewards

        # Train agent after collecting sufficient data
        if t >= args.start_timesteps:
            policy.train(replay_buffer, args.batch_size)

        if done:
            if args.use_hindsight:
                replay_buffer.add_hindsight(
                    trajectory,
                    goal,
                    env,
                    k=args.k,
                    fetch_reach=utils_object.fetch_reach)
            # +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
            print(
                f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f} Original Reward: {original_episode_reward:.3f}"
            )
            # Reset environment
            state, done = env.reset(), False
            episode_reward = 0
            original_episode_reward = 0
            episode_timesteps = 0
            episode_num += 1
            if args.custom_env:
                epsilon = epsilons[episode_num]
                env.set_epsilon(epsilon)

            trajectory = []

        # Evaluate episode
        if (t + 1) % args.eval_freq == 0:
            evaled_policy = eval_policy(policy,
                                        args.env,
                                        args.seed,
                                        utils_object=utils_object)
            evaluations.append(evaled_policy)
            np.save(f"./results/{file_name}", evaluations)
            if args.save_model:
                policy.save(f"./models/{file_name}")
            if args.plot:
                plotter.plot(file_name, args.custom_env)
            if args.tune_run:
                tune.report(episode_reward_mean=evaled_policy[0])
コード例 #25
0
ファイル: functions.py プロジェクト: Sunqia/salome-med-1
def TEST_FuncChapeau():
    f = FuncChapeau(xlimit=0.3)
    from plotter import plot
    plot(f)
コード例 #26
0
def TEST_FuncLagrange():
    points = {0.:5, 0.2:10, 0.9:10, 0.6:21, 1:8} 
    f=FuncLagrange(points)
    from plotter import plot
    plot(f)
コード例 #27
0
ファイル: dpa.py プロジェクト: ranjanabhb/AlgorithmicThinking
        #update the number of nodes
        self._num_nodes += 1
        return new_node_neighbors

def dpa(n, m, dpa_trial):
    if m > n:
        return

    graph = project.make_complete_graph(m)

    print("Before DPA: " + str(len(graph)))
    for i in range(m, n):        
        new_neighbors = dpa_trial.run_trial(m)
        #print(str(i) + " -> " + str(new_neighbors))
        graph[i] = new_neighbors
    print("After DPA: " + str(len(graph)))

    return graph

print("Time: " + str(datetime.datetime.now().time()))
dpa_trial = DPATrial(13)
print("Time: " + str(datetime.datetime.now().time()))
dpa_graph = dpa(28000, 13, dpa_trial)
print("Time: " + str(datetime.datetime.now().time()))
normalized_dpa_graph = application.normalize_graph(len(dpa_graph),
                                                   project.in_degree_distribution(dpa_graph))
application.verify_normalized_distribution(normalized_dpa_graph)
plotter.plot(normalized_dpa_graph.keys(),
             normalized_dpa_graph.values())
print("Time: " + str(datetime.datetime.now().time()))
コード例 #28
0
ファイル: destination.py プロジェクト: rahulsekar/sargam
 def done( self ) :
     plotter.plot( self.pcss, self.times )
コード例 #29
0
def TEST_FuncConique():
    f=FuncConique(xlimit=0.3)
    from plotter import plot
    plot(f)
コード例 #30
0
def driver(inputdir,
           outputdir,
           datadir,
           plotdir,
           preddir,
           trainflag,
           validflag,
           testflag,
           normalize,
           fmean,
           fstdev,
           scale,
           fmin,
           fmax,
           scalelims,
           fsize,
           rmse_file,
           r2_file,
           inD,
           outD,
           ilog,
           olog,
           TFRfile,
           batch_size,
           ncores,
           buffer_size,
           gridsearch,
           architectures,
           layers,
           lay_params,
           activations,
           act_params,
           nodes,
           lengthscale,
           max_lr,
           clr_mode,
           clr_steps,
           epochs,
           patience,
           weight_file,
           resume,
           plot_cases,
           fxvals,
           xlabel,
           ylabel,
           filters=None,
           filt2um=1.):
    """
    Driver function to handle model training and evaluation.

    Inputs
    ------
    inputdir   : string. Path/to/directory of inputs.
    outputdir  : string. Path/to/directory of outputs.
    datadir    : string. Path/to/directory of data.
    plotdir    : string. Path/to/directory of plots.
    preddir    : string. Path/to/directory of predictions.
    trainflag  : bool.   Determines whether to train    the NN model.
    validflag  : bool.   Determines whether to validate the NN model.
    testflag   : bool.   Determines whether to test     the NN model.
    normalize  : bool.   Determines whether to normalize the data.
    fmean      : string. Path/to/file of mean  of training data.
    fstdev     : string. Path/to/file of stdev of training data.
    scale      : bool.   Determines whether to scale the data.
    fmin       : string. Path/to/file of minima of training data.
    fmax       : string. Path/to/file of maxima of training data.
    scalelims  : list, floats. [min, max] of range of scaled data.
    rmse_file  : string. Prefix for savefiles for RMSE calculations.
    r2_file    : string. Prefix for savefiles for R2 calculations.
    inD        : int.    Dimensionality of the input  data.
    outD       : int.    Dimensionality of the output data.
    ilog       : bool.   Determines whether to take the log10 of intput  data.
    olog       : bool.   Determines whether to take the log10 of output data.
    TFRfile    : string. Prefix for TFRecords files.
    batch_size : int.    Size of batches for training/validating/testing.
    ncores     : int.    Number of cores to use to load data cases.
    buffer_size: int.    Number of data cases to pre-load in memory.
    gridsearch : bool.   Determines whether to perform a grid search over 
                         `architectures`.
    architectures: list. Model architectures to consider.
    layers     : list, str.  Types of hidden layers.
    lay_params : list, ints. Parameters for the layer type 
                             E.g., kernel size
    activations: list, str.  Activation functions for each hidden layer.
    act_params : list, floats. Parameters for the activation functions.
    nodes      : list, ints. For layers with nodes, number of nodes per layer.
    lengthscale: float.  Minimum learning rat.e
    max_lr     : float.  Maximum learning rate.
    clr_mode   : string. Sets the cyclical learning rate function.
    clr_steps  : int.    Number of steps per cycle of the learning rate.
    epochs     : int.    Max number of iterations through dataset for training.
    patience   : int.    If no model improvement after `patience` epochs, 
                         halts training.
    weight_file: string. Path/to/file where NN weights are saved.
    resume     : bool.   Determines whether to resume training.
    plot_cases : list, ints. Cases from test set to plot.
    fxvals     : string. Path/to/file of X-axis values to correspond to 
                         predicted Y values.
    xlabel     : string. X-axis label for plotting.
    ylabel     : string. Y-axis label for plotting.
    filters    : list, strings.  Paths/to/filter files.  Default: None
                         If specified, will compute RMSE/R2 stats over the 
                         integrated filter bandpasses.
    filt2um    : float.  Conversion factor for filter file wavelengths to 
                         microns.  Default: 1.0
    """
    # Get file names, calculate number of cases per file
    print('Loading files & calculating total number of batches...')

    try:
        datsize = np.load(inputdir + fsize)
        num_train = datsize[0]
        num_valid = datsize[1]
        num_test = datsize[2]
    except:
        ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
        fvalid = glob.glob(datadir + 'valid' + os.sep + '*.npy')
        ftest = glob.glob(datadir + 'test' + os.sep + '*.npy')
        num_train = U.data_set_size(ftrain, ncores)
        num_valid = U.data_set_size(fvalid, ncores)
        num_test = U.data_set_size(ftest, ncores)
        np.save(inputdir + fsize, np.array([num_train, num_valid, num_test]))
        del ftrain, fvalid, ftest

    print("Data set sizes")
    print("Training   data:", num_train)
    print("Validation data:", num_valid)
    print("Testing    data:", num_test)
    print("Total      data:", num_train + num_valid + num_test)

    train_batches = num_train // batch_size
    valid_batches = num_valid // batch_size
    test_batches = num_test // batch_size

    # Update `clr_steps`
    if clr_steps == "range test":
        clr_steps = train_batches * epochs
        rng_test = True
    else:
        clr_steps = train_batches * int(clr_steps)
        rng_test = False

    # Get mean/stdev for normalizing
    if normalize:
        print('\nNormalizing the data...')
        try:
            mean = np.load(inputdir + fmean)
            stdev = np.load(inputdir + fstdev)
        except:
            print("Calculating the mean and standard deviation of the data " +\
                  "using Welford's method.")
            # Compute stats
            ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
            mean, stdev, datmin, datmax = S.mean_stdev(ftrain, inD, ilog, olog)
            np.save(inputdir + fmean, mean)
            np.save(inputdir + fstdev, stdev)
            np.save(inputdir + fmin, datmin)
            np.save(inputdir + fmax, datmax)
            del datmin, datmax, ftrain
        print("mean :", mean)
        print("stdev:", stdev)
        # Slice desired indices
        x_mean, y_mean = mean[:inD], mean[inD:]
        x_std, y_std = stdev[:inD], stdev[inD:]
        # Memory cleanup -- no longer need full mean/stdev arrays
        del mean, stdev
    else:
        x_mean = 0.
        x_std = 1.
        y_mean = 0.
        y_std = 1.

    if olog:
        # To properly calculate RMSE & R2 for log-scaled output
        try:
            y_mean_delog = np.load(inputdir +
                                   fmean.replace(".npy", "_delog.npy"))
        except:
            mean_delog = S.mean_stdev(ftrain, inD, ilog, False)[0]
            y_mean_delog = mean_delog[inD:]
            np.save(inputdir + fmean.replace(".npy", "_delog.npy"),
                    y_mean_delog)
    else:
        y_mean_delog = y_mean

    # Get min/max values for scaling
    if scale:
        print('\nScaling the data...')
        try:
            datmin = np.load(inputdir + fmin)
            datmax = np.load(inputdir + fmax)
        except:
            ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
            mean, stdev, datmin, datmax = S.mean_stdev(ftrain, inD, ilog, olog)
            np.save(inputdir + fmean, mean)
            np.save(inputdir + fstdev, stdev)
            np.save(inputdir + fmin, datmin)
            np.save(inputdir + fmax, datmax)
            del mean, stdev, ftrain
        print("min  :", datmin)
        print("max  :", datmax)
        # Slice desired indices
        x_min, y_min = datmin[:inD], datmin[inD:]
        x_max, y_max = datmax[:inD], datmax[inD:]
        # Memory cleanup -- no longer need min/max arrays
        del datmin, datmax

        # Normalize min/max values
        if normalize:
            x_min = U.normalize(x_min, x_mean, x_std)
            x_max = U.normalize(x_max, x_mean, x_std)
            y_min = U.normalize(y_min, y_mean, y_std)
            y_max = U.normalize(y_max, y_mean, y_std)
    else:
        x_min = 0.
        x_max = 1.
        y_min = 0.
        y_max = 1.
        scalelims = [0., 1.]

    # Get TFRecord file names
    print('\nLoading TFRecords file names...')
    TFRpath = inputdir + 'TFRecords' + os.sep + TFRfile
    ftrain_TFR = glob.glob(TFRpath + 'train*.tfrecords')
    fvalid_TFR = glob.glob(TFRpath + 'valid*.tfrecords')
    ftest_TFR = glob.glob(TFRpath + 'test*.tfrecords')

    if len(ftrain_TFR) == 0 or len(fvalid_TFR) == 0 or len(ftest_TFR) == 0:
        # Doesn't exist -- make them
        print("\nSome TFRecords files do not exist yet.")
        ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
        fvalid = glob.glob(datadir + 'valid' + os.sep + '*.npy')
        ftest = glob.glob(datadir + 'test' + os.sep + '*.npy')
        if len(ftrain_TFR) == 0:
            print("Making TFRecords for training data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'train.tfrecords',
                ftrain, inD, ilog, olog, batch_size, train_batches)
        if len(fvalid_TFR) == 0:
            print("\nMaking TFRecords for validation data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'valid.tfrecords',
                fvalid, inD, ilog, olog, batch_size, valid_batches)
        if len(ftest_TFR) == 0:
            print("\nMaking TFRecords for test data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'test.tfrecords',
                ftest, inD, ilog, olog, batch_size, test_batches)
        print("\nTFRecords creation complete.")
        # Free memory
        del ftrain, fvalid, ftest
        # Get TFR file names for real this time
        ftrain_TFR = glob.glob(TFRpath + 'train*.tfrecords')
        fvalid_TFR = glob.glob(TFRpath + 'valid*.tfrecords')
        ftest_TFR = glob.glob(TFRpath + 'test*.tfrecords')

    # Load the xvals
    xvals = np.load(fxvals)

    # Perform grid search
    if gridsearch:
        # Train a model for each architecture, w/ unique directories
        print("\nPerforming a grid search.\n")
        maxlen = 0
        for i, arch in enumerate(architectures):
            if len(arch) > maxlen:
                maxlen = len(arch)
            archdir = os.path.join(outputdir, arch, '')
            wsplit = weight_file.rsplit(os.sep, 1)[1].rsplit('.', 1)
            wfile = ''.join([archdir, wsplit[0], '_', arch, '.', wsplit[1]])
            U.make_dir(archdir)
            nn = NNModel(ftrain_TFR,
                         fvalid_TFR,
                         ftest_TFR,
                         inD,
                         outD,
                         olog,
                         x_mean,
                         x_std,
                         y_mean,
                         y_std,
                         x_min,
                         x_max,
                         y_min,
                         y_max,
                         scalelims,
                         ncores,
                         buffer_size,
                         batch_size,
                         [train_batches, valid_batches, test_batches],
                         layers[i],
                         lay_params[i],
                         activations[i],
                         act_params[i],
                         nodes[i],
                         lengthscale,
                         max_lr,
                         clr_mode,
                         clr_steps,
                         wfile,
                         stop_file='./STOP',
                         train_flag=True,
                         shuffle=True)
            nn.train(train_batches, valid_batches, epochs, patience)
            P.loss(nn, archdir)
        # Print/save out the minmium validation loss for each architecture
        minvl = np.ones(len(architectures)) * np.inf
        print('Grid search summary')
        print('-------------------')
        with open(outputdir + 'gridsearch.txt', 'w') as foo:
            foo.write('Grid search summary\n')
            foo.write('-------------------\n')
        for i, arch in enumerate(architectures):
            archdir = os.path.join(outputdir, arch, '')
            history = np.load(archdir + 'history.npz')
            minvl[i] = np.amin(history['val_loss'])
            print(arch.ljust(maxlen, ' ') + ': ' + str(minvl[i]))
            with open(outputdir + 'gridsearch.txt', 'a') as foo:
                foo.write(arch.ljust(maxlen, ' ') + ': ' \
                          + str(minvl[i]) + '\n')
        return

    # Train a model
    if trainflag:
        print('\nBeginning model training.\n')
        nn = NNModel(ftrain_TFR,
                     fvalid_TFR,
                     ftest_TFR,
                     inD,
                     outD,
                     olog,
                     x_mean,
                     x_std,
                     y_mean,
                     y_std,
                     x_min,
                     x_max,
                     y_min,
                     y_max,
                     scalelims,
                     ncores,
                     buffer_size,
                     batch_size, [train_batches, valid_batches, test_batches],
                     layers,
                     lay_params,
                     activations,
                     act_params,
                     nodes,
                     lengthscale,
                     max_lr,
                     clr_mode,
                     clr_steps,
                     weight_file,
                     stop_file='./STOP',
                     train_flag=True,
                     shuffle=True,
                     resume=resume)
        nn.train(train_batches, valid_batches, epochs, patience)
        # Plot the loss
        P.loss(nn, plotdir)

    # Call new model with shuffle=False
    nn = NNModel(ftrain_TFR,
                 fvalid_TFR,
                 ftest_TFR,
                 inD,
                 outD,
                 olog,
                 x_mean,
                 x_std,
                 y_mean,
                 y_std,
                 x_min,
                 x_max,
                 y_min,
                 y_max,
                 scalelims,
                 ncores,
                 buffer_size,
                 batch_size, [train_batches, valid_batches, test_batches],
                 layers,
                 lay_params,
                 activations,
                 act_params,
                 nodes,
                 lengthscale,
                 max_lr,
                 clr_mode,
                 clr_steps,
                 weight_file,
                 stop_file='./STOP',
                 train_flag=False,
                 shuffle=False,
                 resume=False)
    nn.model.load_weights(weight_file)  # Load the model
    # Save in ONNX format
    #onnx_model = keras2onnx.convert_keras(nn.model)
    #onnx.save_model(onnx_model, nn.weight_file.rsplit('.', 1)[0] + '.onnx')

    # Validate model
    if (validflag or trainflag) and not rng_test:
        print('\nValidating the model...\n')
        # Y values
        print('  Predicting...')
        fvalpred = nn.Yeval('pred',
                            'valid',
                            preddir,
                            denorm=(normalize == False and scale == False))
        fvalpred = glob.glob(fvalpred + '*')

        print('  Loading the true Y values...')
        fvaltrue = nn.Yeval('true',
                            'valid',
                            preddir,
                            denorm=(normalize == False and scale == False))
        fvaltrue = glob.glob(fvaltrue + '*')
        ### RMSE & R2
        print('\n Calculating RMSE & R2...')
        if not normalize and not scale:
            val_stats = S.rmse_r2(fvalpred,
                                  fvaltrue,
                                  y_mean,
                                  olog=olog,
                                  y_mean_delog=y_mean_delog,
                                  x_vals=xvals,
                                  filters=filters,
                                  filt2um=filt2um)
        else:
            val_stats = S.rmse_r2(fvalpred, fvaltrue, y_mean, y_std, y_min,
                                  y_max, scalelims, olog, y_mean_delog, xvals,
                                  filters, filt2um)
        # RMSE
        if np.any(val_stats[0] != -1) and np.any(val_stats[1] != -1):
            print('  Normalized RMSE       : ', val_stats[0])
            print('  Mean normalized RMSE  : ', np.mean(val_stats[0]))
            print('  Denormalized RMSE     : ', val_stats[1])
            print('  Mean denormalized RMSE: ', np.mean(val_stats[1]))
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=val_stats[0],
                     rmse_mean=np.mean(val_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = True
        elif np.any(val_stats[0] != -1):
            print('  RMSE     : ', val_stats[0])
            print('  Mean RMSE: ', np.mean(val_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = False
        elif np.any(val_stats[1] != -1):
            print('  RMSE     : ', val_stats[1])
            print('  Mean RMSE: ', np.mean(val_stats[1]))
            saveRMSEnorm = False
            saveRMSEdenorm = True
        else:
            print("  No files passed in to compute RMSE.")
            saveRMSEnorm = False
            saveRMSEdenorm = False
        if saveRMSEnorm:
            P.plot(''.join([plotdir, rmse_file, '_val_norm.png']), xvals,
                   val_stats[0], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=val_stats[0],
                     rmse_mean=np.mean(val_stats[0]))
        if saveRMSEdenorm:
            P.plot(''.join([plotdir, rmse_file, '_val_denorm.png']), xvals,
                   val_stats[1], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_val_denorm.npz',
                     rmse=val_stats[1],
                     rmse_mean=np.mean(val_stats[1]))
        # R2
        if np.any(val_stats[2] != -1) and np.any(val_stats[3] != -1):
            print('  Normalized R2       : ', val_stats[2])
            print('  Mean normalized R2  : ', np.mean(val_stats[2]))
            print('  Denormalized R2     : ', val_stats[3])
            print('  Mean denormalized R2: ', np.mean(val_stats[3]))
            saveR2norm = True
            saveR2denorm = True
        elif np.any(val_stats[2] != -1):
            print('  R2     : ', val_stats[2])
            print('  Mean R2: ', np.mean(val_stats[2]))
            saveR2norm = True
            saveR2denorm = False
        elif np.any(val_stats[3] != -1):
            print('  R2     : ', val_stats[3])
            print('  Mean R2: ', np.mean(val_stats[3]))
            saveR2norm = False
            saveR2denorm = True
        else:
            print("  No files passed in to compute R2.")
            saveR2norm = False
            saveR2denorm = False
        if saveR2norm:
            P.plot(''.join([plotdir, r2_file, '_val_norm.png']), xvals,
                   val_stats[2], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_val_norm.npz',
                     r2=val_stats[2],
                     r2_mean=np.mean(val_stats[2]))
        if saveR2denorm:
            P.plot(''.join([plotdir, r2_file, '_val_denorm.png']), xvals,
                   val_stats[3], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_val_denorm.npz',
                     r2=val_stats[3],
                     r2_mean=np.mean(val_stats[3]))

    # Evaluate model on test set
    if testflag and not rng_test:
        print('\nTesting the model...\n')
        # Y values
        print('  Predicting...')
        ftestpred = nn.Yeval('pred',
                             'test',
                             preddir,
                             denorm=(normalize == False and scale == False))
        ftestpred = glob.glob(ftestpred + '*')

        print('  Loading the true Y values...')
        ftesttrue = nn.Yeval('true',
                             'test',
                             preddir,
                             denorm=(normalize == False and scale == False))
        ftesttrue = glob.glob(ftesttrue + '*')
        ### RMSE & R2
        print('\n Calculating RMSE & R2...')
        if not normalize and not scale:
            test_stats = S.rmse_r2(ftestpred,
                                   ftesttrue,
                                   y_mean,
                                   olog=olog,
                                   y_mean_delog=y_mean_delog,
                                   x_vals=xvals,
                                   filters=filters,
                                   filt2um=filt2um)
        else:
            test_stats = S.rmse_r2(ftestpred, ftesttrue, y_mean, y_std, y_min,
                                   y_max, scalelims, olog, y_mean_delog, xvals,
                                   filters, filt2um)
        # RMSE
        if np.any(test_stats[0] != -1) and np.any(test_stats[1] != -1):
            print('  Normalized RMSE       : ', test_stats[0])
            print('  Mean normalized RMSE  : ', np.mean(test_stats[0]))
            print('  Denormalized RMSE     : ', test_stats[1])
            print('  Mean denormalized RMSE: ', np.mean(test_stats[1]))
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=test_stats[0],
                     rmse_mean=np.mean(test_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = True
        elif np.any(test_stats[0] != -1):
            print('  RMSE     : ', test_stats[0])
            print('  Mean RMSE: ', np.mean(test_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = False
        elif np.any(test_stats[1] != -1):
            print('  RMSE     : ', test_stats[1])
            print('  Mean RMSE: ', np.mean(test_stats[1]))
            saveRMSEnorm = False
            saveRMSEdenorm = True
        else:
            print("  No files passed in to compute RMSE.")
            saveRMSEnorm = False
            saveRMSEdenorm = False
        if saveRMSEnorm:
            P.plot(''.join([plotdir, rmse_file, '_test_norm.png']), xvals,
                   test_stats[0], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_test_norm.npz',
                     rmse=test_stats[0],
                     rmse_mean=np.mean(test_stats[0]))
        if saveRMSEdenorm:
            P.plot(''.join([plotdir, rmse_file, '_test_denorm.png']), xvals,
                   test_stats[1], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_test_denorm.npz',
                     rmse=test_stats[1],
                     rmse_mean=np.mean(test_stats[1]))
        # R2
        if np.any(test_stats[2] != -1) and np.any(test_stats[3] != -1):
            print('  Normalized R2       : ', test_stats[2])
            print('  Mean normalized R2  : ', np.mean(test_stats[2]))
            print('  Denormalized R2     : ', test_stats[3])
            print('  Mean denormalized R2: ', np.mean(test_stats[3]))
            saveR2norm = True
            saveR2denorm = True
        elif np.any(test_stats[2] != -1):
            print('  R2     : ', test_stats[2])
            print('  Mean R2: ', np.mean(test_stats[2]))
            saveR2norm = True
            saveR2denorm = False
        elif np.any(test_stats[3] != -1):
            print('  R2     : ', test_stats[3])
            print('  Mean R2: ', np.mean(test_stats[3]))
            saveR2norm = False
            saveR2denorm = True
        else:
            print("  No files passed in to compute R2.")
            saveR2norm = False
            saveR2denorm = False
        if saveR2norm:
            P.plot(''.join([plotdir, r2_file, '_test_norm.png']), xvals,
                   test_stats[2], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_test_norm.npz',
                     r2=test_stats[2],
                     r2_mean=np.mean(test_stats[2]))
        if saveR2denorm:
            P.plot(''.join([plotdir, r2_file, '_test_denorm.png']), xvals,
                   test_stats[3], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_test_denorm.npz',
                     r2=test_stats[3],
                     r2_mean=np.mean(test_stats[3]))

    # Plot requested cases
    if not rng_test:
        predfoo = sorted(glob.glob(preddir + 'test' + os.sep + 'pred*'))
        truefoo = sorted(glob.glob(preddir + 'test' + os.sep + 'true*'))
        if len(predfoo) > 0 and len(truefoo) > 0:
            print("\nPlotting the requested cases...")
            nplot = 0
            for v in plot_cases:
                fname = plotdir + 'spec' + str(v) + '_pred-vs-true.png'
                predspec = np.load(predfoo[v // batch_size])[v % batch_size]
                predspec = U.denormalize(
                    U.descale(predspec, y_min, y_max, scalelims), y_mean,
                    y_std)
                truespec = np.load(truefoo[v // batch_size])[v % batch_size]
                truespec = U.denormalize(
                    U.descale(truespec, y_min, y_max, scalelims), y_mean,
                    y_std)
                if olog:
                    predspec[olog] = 10**predspec[olog]
                    truespec[olog] = 10**truespec[olog]
                P.plot_spec(fname, predspec, truespec, xvals, xlabel, ylabel)
                nplot += 1
                print("  Plot " + str(nplot) + "/" + str(len(plot_cases)),
                      end='\r')
            print("")
        else:
            raise Exception("No predictions found in " + preddir + "test.")

    return
コード例 #31
0
def starter():  #user initialisation

    cs.main()
    a = input("Do you want to plot?\n y/n\nUserinput:")  #help user plot graphs
    if a == 'y':
        b = input(
            "Plotting options available are press the no  eg 1 or 2:\n1.Single plot\n2.Multiplot\nUser input: "
        )
        if b == '2':
            multiplot()
        else:
            go = 'y'
            while (go == 'y'):
                flname = gf.outputarray[int(
                    input(
                        "Enter the output file no you want to plot?\nUser input: "
                    )) - 1]
                meterno = int(input("Enter the Meter number\nUser input: "))
                title = input("Enter the title of the plot\nUser input: ")
                plot(flname, meterno, title)
                go = input("Do you want to plot again?\n y/n\n")
    appa = input(
        "Do you want to compute any functions \npress y/n\nUserinput: ")
    if appa == 'y':
        while appa == 'y':
            rval = int(
                input(
                    "Functions available:\n1.Average\n2.Ripple\n3.RMS\n4.THD\n5.Moving Average\n6.Peak\n7.Optimizing an external variable or expression\nUser input: "
                ))  # compute vallues
            if (rval == 1):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(avg(num, rval1))
            if (rval == 2):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(ripple(num, rval1))
            if (rval == 3):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(rms(num, rval1))
            if (rval == 4):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(thd(num, rval1))
            if (rval == 5):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(moving_avg(num, rval1))
            if (rval == 6):
                num = (int(input("Enter file output number\nUser input: ")) -
                       1)
                rval1 = (int(input("Enter the meter number\nUser input: ")) -
                         1)
                print(peak(num, rval1))
            if (rval == 7):
                posoffile = int(
                    input("Enter the  variable list no\nUser input: "))
                ev.uservariable()
                print((gv.externalvariable[posoffile - 1]).value)
            appa = input("Do you want to compute any functions press y/n\n")

    opt = input("Do you want to optimize?\n y/n\nUserinput: ")
    if (opt == 'y'):
        f = open("feasible.txt", "w")
        f.write(" The results are given below\n")
        f.close()
        f = open("nondominanted_solutions.txt", "w")
        f.write(" The results are given below\n")
        f.close()
        f = open("searchlog.txt", "w")
        f.write(" the logging procedures have started")
        f.close()
        feat = int(
            input(
                "Which of the following feature do you want?\n 1.Optimization \n 2.Topology change and Optimization\nUser input: "
            ))
        if (feat == 1):
            initalization()
        elif (feat == 2):
            vctmain()
    return
コード例 #32
0
ファイル: solve.py プロジェクト: ivision-ufba/segperson
    import setproctitle
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass

weights = '../ilsvrc-nets/vgg16-fcn.caffemodel'

# init
caffe.set_device(0)
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt('../data/segperson/indices/val.txt', dtype=str)

acc = np.empty(75)
loss = np.empty(75)

for it in range(75):
    solver.step(4000)
    a, l = score.seg_tests(solver, False, val, layer='score')
    acc[it] = 100 * a
    loss[it] = l
    plotter.plot(acc, loss, it)
コード例 #33
0
whs[1:] = [np.random.randn(4 * hidden_size, 2 * hidden_size + 1)
           ] * (num_layers - 1)
wy = np.random.randn(task.vocab_size, hidden_size + 1)
ws[:] = whs, wy
ws = ws * 1e-3
#ws[1] = wy

init_whs = lambda: whs - whs  #workaround: alternative to np.zeros_like(ws)
#init_wy = lambda: np.zeros_like(wy) #workaround: alternative to np.zeros_like(ws)
init_ws = lambda: ws - ws  #(init_whs(), init_wy())
init_hc = lambda n=batch_size: np.zeros((2, hidden_size, n))
init_hscs = lambda n=batch_size: np.zeros((2, num_layers, hidden_size, n))
optimizer = Adagrad(init_ws)
tr_loss, val_acc, n = {}, {}, 0
#ws, tr_loss, val_acc, n = restore(exp_name)

for i in itertools.count(n):
    xs, ys = task.next_batch()
    tr_loss[i], caches = lstm_forward(xs, ys, ws, init_hscs)
    dws = lstm_backward(caches, init_hscs, init_whs)
    ws = optimizer.update(ws, dws)

    if (i + 1) % 10 == 0:
        xs, ys = task.get_val_data()
        val_acc[i] = lstm_val(xs, ys, ws, init_hscs, task)
        text = lstm_sample(task.rand_x(), ws, init_hscs, task.vocab_size, task)
        print(f'Loss: {dict_mean(tr_loss)} \n {task.array_to_sen(text)}\n\n')
        print(f'Val acc: {val_acc[i]} Max: {dict_max(val_acc)}')
        plot(val_acc)
        save(exp_name, ws, tr_loss, val_acc, i)
コード例 #34
0
ファイル: test.py プロジェクト: trtdevelopment/sumo
X = load(filename)

flowWEA = X[:, 2]
flowNSA = X[:, 3]
avgDelayWEA = X[:, 4]
avgDelayNSA = X[:, 5]

[X, Y] = meshgrid(range(300, 1300, 100), range(300, 1300, 100))

Z = griddata(flowWEA, flowNSA, avgDelayWEA, X, Y)
delayFCWE = Z[0]
Z = griddata(flowWEA, flowNSA, avgDelayNSA, X, Y)
delayFCNS = Z[0]


figure(figsize=(12, 6))
subplot(1, 2, 1)
plot(flow, delayFCWE, flow, delayVAWE)
ylim(0, 60)
xlabel("Flow")
ylabel("Average Delay WE")
legend(("FC", "VA"), loc='upper left')
subplot(1, 2, 2)
plot(flow, delayFCNS, flow, delayVANS)
ylim(0, 60)
xlabel("Flow")
ylabel("Average Delay NS")
legend(("FC", "VA"), loc='upper left')
show()
コード例 #35
0
#!/usr/bin/python2

import sys

sys.path.insert(1, '../')

import plotter as plt

p1 = plt.plot([1, 2, 3], [1, 3, 2],
              'r-o', [1, 2, 2.5, 3], [3, 2, 1.5, 1],
              'b-o',
              new=True,
              position=[2, 1, 1])
p2 = plt.plot([1, 2, 3], [1, 3, 2],
              'r-o', [1, 2, 3], [3, 2, 1],
              'b-o',
              new=True,
              position=[2, 1, 2])

#print p1._datapairs[0].clipPath()

plt.show()

#plt.plot([1,2,3],[1,3,2], '')
#plt.show()
#
#plt.show()
#
#plt.plot([1,2,3],[1,3,2], '')
#plt.plot([1,2,3,4],[1,3,2,4], '')
#plt.show()
コード例 #36
0
import client
import pandas as pd
import numpy as np
import plotter as p

df = pd.read_csv('./list.csv', index_col='symbol')
for index, item in df.iterrows():
    symbol = item.name
    equity = client.get_last(symbol)
    #normalize
    norm_equity = equity.drop(['date', 'volume'], axis=1)
    min_equity = norm_equity['low'].min()
    max_equity = norm_equity['high'].max()
    norm_equity = (norm_equity - min_equity) / (max_equity - min_equity)

    norm_volume = equity.drop(['date', 'open', 'high', 'low', 'close'], axis=1)
    min_volume = norm_volume['volume'].min()
    max_volume = norm_volume['volume'].max()
    norm_volume = (norm_volume - min_volume) / (max_volume - min_volume)

    p.plot(symbol, norm_equity, norm_volume)

print("Done.")
コード例 #37
0
ファイル: main.py プロジェクト: Christopher-Bradshaw/learning
                (real - width/2)/width * img_max_x,
                (imag - height/2)/height* img_max_y,
        )


    def mandelbrot(c, max_iter, max_value):
        z = 0
        for k in range(max_iter):
            z = z*z + c
            if abs(z) > max_value:
                return k
        return max_iter

    for i in range(height):
        for j in range(width):
            arr[i, j] = mandelbrot(complex_from_coords(j, i), max_iter, max_value)

    return arr


start = time.time()
output = naive_mandelbrot(256, 256, 4, 4, 100, 10)
print("Naive took: {}".format(time.time() - start))

plot(output)
# arr = (255 * (arr.astype(np.float32) / np.max(arr))).astype(np.uint8)

# img = Image.fromarray(arr, "L")
# img.save('my.tiff')
# img.show()
コード例 #38
0
ファイル: run.py プロジェクト: vymiyai/hexagonalizer
def to_pixels( point_dictionary, hexagon_points ):
	try:
		return [ point_dictionary[ point ] for point in hexagon_points ]
	except KeyError:
		return []

import plotter

scale_factor = 4

image = Image.open( 'index.png' )
image = image.resize((image.width*scale_factor, image.height*scale_factor))
img_draw = ImageDraw.Draw(image)

points = plotter.plot(10, image.width, image.height)

points_in_pixels = []
hexagons = {}
for row, column in points.keys():
	# if the row number is even, an offset must be applied.
	point = (row, column)
	point_in_pixels = points[point]

	if (row % 2 != column % 2):
		# row is even.
		if (row % 2) == 0:
			if (column + 1) % 6 != 0:
				points_in_pixels.append( point_in_pixels )
			else:
				hexagons[ point ] = get_hexagon_points( point )
コード例 #39
0
batch_size = 128

while frame_idx < max_frames:
    state = env.reset()
    episode_reward = 0

    for step in range(max_steps):
        if frame_idx > 1000:
            action = policy_net.get_action(state).detach()
            next_state, reward, done, _ = env.step(action.numpy())
        else:
            action = env.action_space.sample()
            next_state, reward, done, _ = env.step(action)

        replay_buffer.push(state, action, reward, next_state, done)

        state = next_state
        episode_reward += reward
        frame_idx += 1

        if len(replay_buffer) > batch_size:
            update(batch_size)

        if frame_idx % 1000 == 0:
            plot(frame_idx, rewards)

        if done:
            break

    rewards.append(episode_reward)
コード例 #40
0
            accuracy = accuracy_score(y_test, pred)
            acc.append(accuracy)
            preds.append([y_test, pred])
        return scores(np.asarray(acc)), np.asarray(preds)[-1]


#data = np.load('../data/binary_data.npy')
data = np.load('../data/condensed_time_series_data.npy')

train_x = data[:, :-2]
train_y = data[:, -1]

cls = Cluster_classifier(12, SVC)
scores, preds = cls.cv_func(10, train_x, train_y)
print("Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
print metrics.confusion_matrix(preds[0], preds[1])
plotter.plot("../stats/___svm_clustering", 12, preds[0], preds[1])
print scores.get()

cls = Cluster_classifier(12, RandomForestClassifier)
scores, preds = cls.cv_func(10, train_x, train_y)
print("Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
plotter.plot("../stats/___rf_clustering", 12, preds[0], preds[1])
print scores.get()

cls = Cluster_classifier(12, GaussianNB)
scores, preds = cls.cv_func(10, train_x, train_y)
print("Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
plotter.plot("../stats/___nb_clustering", 12, preds[0], preds[1])
print scores.get()
コード例 #41
0
ファイル: main.py プロジェクト: poonsakn/binging-stonks
import logging

from integrations import fetch
from plotter import plot

logging.basicConfig(level=logging.INFO)


SHOWS = [
    "The Last Airbender",
    "Breaking Bad",
    "Better Call Saul",
    "Game of Thrones",
    "House of Cards",
    "Person of Interest",
    "The Office",
    "Dexter",
    "Scrubs",
    "Gravity Falls",
    "tt1355642",  # Fullmetal Alchemist: Brotherhood
    "Death Note",
    "Community",
    "tt1219024",  # Castle
]

for title in SHOWS:
    show = fetch(title)
    plot(show, save=True)
コード例 #42
0
ファイル: readfile.py プロジェクト: kundor/gpsdata
def main():
    """Read GPS observation data, downloading, gunzipping, and uncompressing as necessary."""
    usage = sys.argv[0] + ' [-hvVgtGT] [-f FORMAT]'
    if 'plotter' in dir():
        usage = usage + ' [-i OBSERVATION]'
    usage = usage + ' <filename> [-o OUTPUT]'
    parser = OptionParser(description=main.__doc__, usage=usage)
    parser.add_option('-v', '--version', action='store_true',
                      help='Show version and quit')
    parser.add_option('-V', '--verbose', action='store_true',
                      help='Verbose operation')
#   parser.add_option('-p', '--pickle', action='store_true',
#                     help='Save parsed data as a pickle file (extension becomes .pkl')
    parser.set_defaults(pickle=None) # TODO: fix pickling
    if 'plotter' in dir():
        parser.add_option('-i', '--image', action='store', metavar='OBSERVATION',
                          help='Plot given OBSERVATION for all  satellites; '
                               ' display unless -o given')
    else:
        parser.set_defaults(image=None)
    parser.add_option('-g', '--gunzip', action='store_const', const=1,
                      help='Force treatment as gzipped')
    parser.add_option('-u', '--uncompress', action='store_const', const=2, dest='gunzip',
                      help="Force treatment as compress'd")
    parser.add_option('-G', '--no-gunzip', action='store_const', const=0, dest='gunzip',
                      help='Do not gunzip or uncompress')
    parser.add_option('-t', '--tar', action='store_true',
                      help='Force treatment as tar file')
    parser.add_option('-T', '--notar', action='store_false', dest='tar',
                      help='Do not untar')
    parser.add_option('-f', '--format', action='store',
                      choices=['RINEX', 'CRINEX'],
                      help='Format of GPS observation file (default: by extension)')
    parser.add_option('-o', '--output', action='append',
                      help='File to save data in (must specify -i or -p)')
    (opts, args) = parser.parse_args()
    if opts.version:
        print('GPSData version', __ver__, 'supporting RINEX version',
              rinex.RNX_VER, 'and Compact RINEX version', rinex.CR_VER, '.')
    elif opts.image and opts.pickle:
        parser.error('Cannot output both a pickle and an image - sorry.')
    elif not args:
        parser.error('Filename or URL required.')
    else:
        try:
            parsed_data = [read_file(url, opts.format, opts.verbose,
                                     opts.gunzip, opts.tar) for url in args]
        except IOError as ioe:
            print(ioe)
            sys.exit(ioe.errno)
        if opts.output and opts.pickle:
            op = open(opts.output[0], 'wb')
            pickle.dump(parsed_data, op, pickle.HIGHEST_PROTOCOL)
            op.close()
        elif opts.output and opts.image:
            for data, out in zip(parsed_data, opts.output):
                # If there are more input files than output names, or vice
                # versa, we write out the lesser number (ignoring the rest)
                plotter.plot(data, opts.image, out)
        elif opts.pickle:
            op = open(args[0] + '.pkl', 'wb')
            pickle.dump(parsed_data, op, pickle.HIGHEST_PROTOCOL)
            op.close()
        elif opts.image:
            for data in parsed_data:
                plotter.plot(data, opts.image)
        for data in parsed_data:
            if data is not None:
                print(data.header_info())
コード例 #43
0
ファイル: functions.py プロジェクト: Sunqia/salome-med-1
def TEST_FuncConique():
    f = FuncConique(xlimit=0.3)
    from plotter import plot
    plot(f)
コード例 #44
0
                       nargs=1,
                       action="store",
                       type=str,
                       metavar="path",
                       help="Monitor a folder for activity")
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(format="%(asctime)s %(levelname)s : %(message)s",
                            level=logging.INFO,
                            datefmt="%-H:%M:%S")

    if args.tmux:
        tmux_string()

    elif args.toggle:
        toggle()

    elif args.quiet:
        os.remove(data_file_path())

    elif args.report:
        day, week = unshelve()
        plot(day, week)

    elif args.monitor is not None:
        set_log_folder(args.monitor[0])

    else:
        print "Doing nothing..."
        print "Run './main.py --help' for help with options"
コード例 #45
0
ファイル: functions.py プロジェクト: Sunqia/salome-med-1
def TEST_FuncStiffExp():
    f = FuncStiffExp(xlimit=0.3, stiffness=20.)
    from plotter import plot
    plot(f)
コード例 #46
0
ファイル: main.py プロジェクト: dmitru/tridiagonal-solver
from optparse import OptionParser
import solver
import plotter
from sys import argv, exit
 
if __name__ == '__main__':
        parser = OptionParser()
        parser.add_option('-n', dest='n', default=10)
        parser.add_option('-o', '--output', dest='output_file', default='solution.txt')
        
        (options, args) = parser.parse_args()
        options.n = int(options.n)

        (A, b, x) = solver.to_linear_system(options.n) 
        print(A.size, b.size, x.size)
        y = solver.solve_system(A, b)
        print(y)
        print(x)

        plotter.plot(y, x)

        print('Start solving the model...')
        print(b)
        print(A)
        print('Done!')
        
        f = open(options.output_file, 'w')
        f.close()
        
        
コード例 #47
0
N_HIRINGS = 5_000
N_CANDIDATES = 20
CHOOSING_METHOD = 'max'
DISTRIBUTION = 'random'

random.seed(1)

# 1

events = create_samples(N_HIRINGS, N_CANDIDATES, 'uniform')
results = np.zeros((N_CANDIDATES - 1, 2))

# 2

for stopping_id in range(1, N_CANDIDATES):
    chosen = find_the_best(events,
                           stopping_id,
                           choosing_method=CHOOSING_METHOD)
    results[stopping_id - 1] = [stopping_id, evaluations_reward(chosen)]

# 3

np.savetxt('results.dat', results)
plot(results,
     title="average score of the hired candidates vs. stopping point",
     xlabel="Stopping point",
     ylabel="(ave.) Score",
     xtics=1,
     filename="./results.png")
コード例 #48
0
ファイル: runner.py プロジェクト: erikm0111/AIclass
		populationSize = populationSize,
		mutationProbability = mutationProbability,
		mutationScale = mutationScale, 
		numIterations = numIterations, 
		errorTreshold = errorTreshold)


	print_every = 100 # Print the output every this many iterations
	plot_every = 200 # Plot the actual vs estimated functions every this many iterations

	# emulated do-while loop
	done = False
	while not done: 

		done, iteration, best = GA.step()

		if iteration % print_every == 0: 
			print "Error at iteration %d = %f" % (iteration, errorClosure(best))

		#if iteration % plot_every == 0: 
		#		NN.setWeights(best)
		#	plotter.plot(X_train, y_train, NN.output(X_train)) 
		#	plotter.plot_surface(X_train, y_train, NN)

	print "Training done, running on test set"
	NN.setWeights(best)

	print "Error on test set: ", NN.forwardStep(X_test, y_test)
	plotter.plot(X_test, y_test, NN.output(X_test))
	plotter.plot_surface(X_test, y_test, NN)
コード例 #49
0
ファイル: nn_test.py プロジェクト: Piggelinus/Project
                with tf.variable_scope(str(dataset_number) + str(num_dataset_to_use) + str(num_hidden) + str(epoch)):

                    # Number of examples, number of input, dimension of each input
                    data = tf.placeholder(tf.float32, [None, chords_in_vector * chord_length])
                    target = tf.placeholder(tf.float32, [None, 1])

                    model = NNModel(data, target)

                    model.start_session()

                    batch_size = 1000
                    model.train(batch_size, epoch, reader.training_attributes, reader.training_labels)

                    incorrect = model.test(reader.testing_attributes, reader.testing_labels)

                    np.set_printoptions(precision=3, suppress=True)
                    print('Hidden Nodes {}, Epoch {:2d} accuracy {:4.2f}%'.format(num_hidden, epoch, 100 * (1 - incorrect)))
                    results[epoch_index].append(1 - incorrect)

                    model.end_session()
    print("")

with open('data/' + str(dataset_number) + '_bar/nn_results_epochs.csv', 'wb') as csvfile:
    writer = csv.writer(csvfile, delimiter=',')
    writer.writerow(cell_names)
    for row in results:
        writer.writerow(row)

import plotter
plotter.plot()
コード例 #50
0
ファイル: webserver_get.py プロジェクト: ashbeats/newscraper
 def save_plot(self):
     plot(url=self.url, name_clean=self.hash)
コード例 #51
0
ファイル: classifier.py プロジェクト: name3anad/SVMDemo
print 'Training set size: ' + str(len(training_set))
print 'Test set size: ' + str(len(test_set))

i = 1000
sizes = []
training_acc = []
cv_acc = []
while i < len(training_set):
    t = training_set[:i]
    c, training, cv = cross_validation(t)
    sizes.append(i)
    training_acc.append(training)
    cv_acc.append(cv)
    i += 1000
    # print i
plot(sizes, ys=[training_acc, cv_acc], legs=['Training', 'CV'])
# Get accuracy
lr_classifier = cross_validation(training_set)[0]
lr_accuracy = classify.accuracy(lr_classifier, test_set)
# print "Classifier accuracy on test: " + str(lr_accuracy)
lr_accuracy_training = classify.accuracy(lr_classifier, training_set)
# print "Classifier accuracy on training: " + str(lr_accuracy_training)

diagnose(lr_classifier, test_set)

sentence = 'a'
while True:
    sentence = raw_input('Test me: ')
    if sentence == '':
        break
    features = feature_extractor(sentence)
コード例 #52
0
ファイル: a4.py プロジェクト: RonanAlmeida/ComplexityGraph
def main():
    while True:  # main menu while loop
        # Do menu choices
        user_choice = menu.do_menu("Main Menu", ["Generate sort time files", "Plot average sort times"])
        if user_choice is None:
            break  # exit choice

        print('\nValid choice:', user_choice)

        if user_choice == 1:  # first menu choice - generate tests
            while True:  # Sub menu
                user_choice = menu.do_menu("Select a sort",
                                           ["Bubble sort",
                                            "Insertion Sort",
                                            "Optimized bubble sort",
                                            "Selection sort"])
                if user_choice is None:
                    break  # exit choice
                print('\nValid choice:', user_choice)

                if user_choice == 1:  # Generating test files  for bubble sort
                    # Calling the data test function to generate the csv file
                    print("\nGenerating test files.. for " + counting_quad_sorts.bubble_sort.__name__)
                    collect_function_performance_data.test_function(counting_quad_sorts.bubble_sort, MAX_N, NUM_TESTS)
                    print("\n" + counting_quad_sorts.bubble_sort.__name__ + ".csv generated")


                elif user_choice == 2:  # Generating test files  for insertion sort
                    # Calling the data test function to generate the csv file
                    print("\nGenerating test files.. for " + counting_quad_sorts.insertion_sort.__name__)
                    collect_function_performance_data.test_function(counting_quad_sorts.insertion_sort, MAX_N,
                                                                    NUM_TESTS)
                    print("\n" + counting_quad_sorts.insertion_sort.__name__ + ".csv generated")

                elif user_choice == 3:  # Generating test files for optimized bubble sort
                    # Calling the data test function to generate the csv file
                    print("\nGenerating test files.. for " + counting_quad_sorts.opt_bubble_sort.__name__)
                    collect_function_performance_data.test_function(counting_quad_sorts.opt_bubble_sort, MAX_N,
                                                                    NUM_TESTS)
                    print("\n" + counting_quad_sorts.opt_bubble_sort.__name__ + ".csv generated")


                elif user_choice == 4:  # Generating test files for selection sort
                    # Calling the data test function to generate the csv file
                    print("\nGenerating test files.. for " + counting_quad_sorts.selection_sort.__name__)
                    collect_function_performance_data.test_function(counting_quad_sorts.selection_sort, MAX_N,
                                                                    NUM_TESTS)
                    print("\n" + counting_quad_sorts.selection_sort.__name__ + ".csv generated")



        elif user_choice == 2:  # 2nd menu choice plot average sort times
            # n num of choices
            while True:  # Sub menu
                file_path = file_chooser.get_file_path_and_name(pattern='*.csv')  # file_path is a list of the csv files
                if file_path is None:
                    break  # exit choice

                if file_path != None:  # if there exists a file(s) in file_path
                    print('Path:', file_path[0])  # display its path
                    print('File:', file_path[1])
                    print('Both:', file_path[0] + "\\" + file_path[1])
                    print("\nCalculating Averages for " + file_path[1])

                    # Calculates the column averages for that particular csv file
                    col_avg = file_column_averages.get_file_column_averages(file_path[1])

                    print("\n Plotting Graph: " + file_path[1][:len(file_path[1]) - 4])

                    # Plotting the graph of the averages

                    # Setting up graph
                    plot_graph = plotter.plot(title=file_path[1][:len(file_path[1]) - 4],
                                              origin_x=15,
                                              origin_y=15,
                                              scale_x=6,
                                              scale_y=0.11,
                                              bg='darkseagreen1')

                    plot_graph['draw_axes'](tick_length=4, tick_interval_y=100)  # set up axes

                    # Plot each point by for loop
                    for x in range(len(col_avg)):
                        plot_graph['plot_point'](x, col_avg[x], 6
                                                 , colour='red')  # color red

                    # the n^2/2 function, commented out
                    # plot_graph['plot_function'](lambda x: (x ** 2) / 2 if x >= 0 else None)
                    # plot_graph['put_text']('T(n) = n^2/2', x=70, y=150, size=12, colour='black')

                    # Labels T (100s), n, legend, t(n) = filename
                    plot_graph['put_text']('T\n(100s)', 2, 5500, size=9, colour='Black')
                    plot_graph['put_text']('n', 100, 100, size=9, colour='Black')
                    plot_graph['put_text']('Legend:', x=70, y=450, size=12, colour='blue')
                    plot_graph['put_text']('T(n) = ' + file_path[1][:len(file_path[1]) - 4], x=70, y=300, size=12,
                                           colour='red')

                    plot_graph['block']()  # Module exits when user closes the canvas window.
コード例 #53
0
def progress_print(digit):
    '''Does a simple console print of our progress'''
    print "Digit %11s %d"% (digit_count,dominant_digit()[0])
    #print digit_distribution_percentage(),dominant_digit()
    plot(digit_distribution_percentage(),digit_count,digit)
コード例 #54
0
ファイル: main.py プロジェクト: ilanolkies/redneu-tp2
from hebbiano import Hebbiano
from plotter import plot, plot_error

nombre_modelo = sys.argv[1]
dataPath = sys.argv[2]

if not path.exists(nombre_modelo + '.p'):
  data, labels = load_dataset(dataPath)
  modelo = Hebbiano(data)

  alg = sys.argv[3]
  if alg == 'oja':
    errors = modelo.train('oja', 9, 0.00001, 0.0001, 2000, 100) #0.001651126636278498
  elif alg == 'sanger':
    errors = modelo.train('sanger', 9, 0.00001, 0.001, 2000, 100) #0.04134266836522473

  modelo.save(nombre_modelo + '.p')
  
  plot_error(errors)
  plot(modelo.test(data), labels)
  
else:
  modelo = Hebbiano()
  modelo.load(nombre_modelo + '.p')

  data, labels = load_dataset(dataPath)

  y = modelo.test(data)

  plot(y, labels)
コード例 #55
0
fig = plt.figure()
ax = plt.axes(projection="3d")
# Set rotation angle to 30 degrees
ax.view_init(azim=-90, elev=90)

h_point = fwdkin(angles[0], angles[1], angles[2], finger,
                 1)  # not constrained by the moment
point = np.array([h_point[4][0][3], h_point[4][1][3], h_point[4][2][3]])

print('Number of iterations: ' + str(ix))
print('Desired position: ' + str(target))
print('Final Position: ' + str(point))
print('Final Angles: ' + str(angles))
print('error in distance: ' + str(dist(target, angles, finger)))

pt.plot(h_point, ax, finger)

# Set Title and labels of plot
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
ax.set_zlabel('z [cm]')
plt.title('Hand Inverse Kinematics')

# Plot the obstacle plane y = -2
xx, zz = np.meshgrid(range(90), range(10))
yy = xx * 0 - 2
ax.plot_surface(xx, yy, zz - 2, alpha=0.6)

# Adjust dimensions of the plot
ax.set_xlim(-4, 90)
ax.set_ylim(-4, 90)
コード例 #56
0
keys = [k for k in lines[0].split()[1:]]

n_columns = len(keys)
n_rows = len(lines)

print(f"{n_rows}x{n_columns}")


def data_at(c, r):
    el_table = f"/html/body/form[1]/div[3]/div[1]/div[2]/div/div[2]/div/div/div[5]/div/div/table/tbody/tr[{3+r}]/td[{2+c}]"
    el_data = driver.find_element_by_xpath(el_table).text
    return float(el_data.replace(',', '')) if el_data else 0.0


data = [[data_at(c, r) for c in range(n_columns + 1)]
        for r in range(n_rows - 1)]

print(keys)
print(lines)
# print(data)

now = datetime.date.today()
_, week, day = now.isocalendar()
data = np.roll(data, 27 - week, axis=1)

# write to CSV
writer.dump_data(data)

# plot data
plotter.plot(data)
コード例 #57
0
ファイル: runner.py プロジェクト: mttk/AIclass
		elitism = elitism,
		populationSize = populationSize,
		mutationProbability = mutationProbability,
		mutationScale = mutationScale, 
		numIterations = numIterations, 
		errorTreshold = errorTreshold)


	print_every = 100 # Print the output every this many iterations
	plot_every = 100 # Plot the actual vs estimated functions every this many iterations

	# emulated do-while loop
	done = False
	while not done: 
		done, iteration, best = GA.step()

		if iteration % print_every == 0: 
			print "Error at iteration %d = %f" % (iteration, errorClosure(best))

		if iteration % plot_every == 0: 
			NN.setWeights(best)
			plotter.plot(X_train, y_train, NN.output(X_train)) 
			plotter.plot_surface(X_train, y_train, NN)

	print "Training done, running on test set"
	NN.setWeights(best)

	print "Error on test set: ", NN.forwardStep(X_test, y_test)
	plotter.plot(X_test, y_test, NN.output(X_test))
	plotter.plot_surface(X_test, y_test, NN)
コード例 #58
0
ファイル: plotters2.py プロジェクト: bbreslauer/Pygraphene
#!/usr/bin/python2

import sys

sys.path.insert(1, '../')

import plotter as plt

p1 = plt.plot([1,2,3],[1,3,2], 'r-o', [1,2,2.5,3], [3,2,1.5,1], 'b-o', new=True, position=[2,1,1])
p2 = plt.plot([1,2,3],[1,3,2], 'r-o', [1,2,3], [3,2,1], 'b-o', new=True, position=[2,1,2])

#print p1._datapairs[0].clipPath()



plt.show()






#plt.plot([1,2,3],[1,3,2], '')
#plt.show()
#
#plt.show()
#
#plt.plot([1,2,3],[1,3,2], '')
#plt.plot([1,2,3,4],[1,3,2,4], '')
#plt.show()