예제 #1
0
def run_simulation(length_in_months, tax_rate):
    rate_after_tax = 1 - tax_rate

    income_entries = income.load_entries_from_file()
    expense_entries = expense.load_entries_from_file()

    months = pd.DataFrame(
        columns=["month", "income", "expenses", "net", "cumulative_net"])
    months["month"] = months["month"].astype(int)
    months["income"] = months["income"].astype(float)
    months["expenses"] = months["expenses"].astype(float)
    months["net"] = months["net"].astype(float)
    months["cumulative_net"] = months["cumulative_net"].astype(float)

    for month in range(1, length_in_months + 1):
        income_for_month = income.calculate_income_for_month(income_entries,
                                                             month)
        income_for_month *= rate_after_tax

        expenses_for_month = expense.calculate_expenses_for_month(
            expense_entries)

        months = months.append(pd.DataFrame({
            "month": [month],
            "income": [income_for_month],
            "expenses": [expenses_for_month],
            "net": [income_for_month - expenses_for_month]
        }), ignore_index=True)

    months["cumulative_net"] = months["net"].cumsum()

    print(months)

    graphing.graph(months)
예제 #2
0
 def add_result(self, result):
     graphed = graph(result)
     result['local_graphs'] = [graphed]
     del result['output']
     self.add_result_local(result)
     self.zipdir_local(result)
     self.add_result_cloud(result)
     self.rm_results_local(result)
예제 #3
0
def creategraph(data, isStatic=True, filterType='sms', graph_directed=True, pid_dict=None):
    pid_dict = getuniqueparticipants(data, filterType) if None is pid_dict else pid_dict
    graph_obj = graph(is_directed=graph_directed, is_multigraph=filterType == 'all')
    if isStatic:
        ignore_mtype = filterType != 'all'
        links, link_tuple = getlinks(pid_dict, data, ignore_mtype)
        graph_obj.addnodes(pid_dict[pr.participant[filterType]].values(), 'P')
        graph_obj.addnodes(pid_dict[pr.nparticipant[filterType]].values(), 'NP')
        graph_obj.addedges(link_tuple)
        return links, link_tuple, graph_obj, pid_dict
    else:
        start_datetime = dt.datetime.strptime(pr.start_datetime, '%Y-%m-%d %H:%M:%S')
        week_dict, link_tuple, week_content = getdynamiclinks(pid_dict,
                                                              data, start_datetime)
        to_write_edge, to_write_node = graph_obj.exportdynamicgraph(link_tuple, pid_dict)
        return to_write_edge, to_write_node, week_dict, pid_dict, week_content
예제 #4
0
파일: handler.py 프로젝트: xtuml/geui
    def open_model(self):
        name = 'mygraph'  #eventually will be from user

        current_graph = graphing.graph()

        #open a saved file
        try:
            saved_file = open('data/' + name + '.xml', 'r')
            xml_string = saved_file.read()
            saved_file.close()
            current_graph = gnosis.xml.pickle.loads(xml_string)

        except IOError:
            pass

        return current_graph
예제 #5
0
파일: cratchit.py 프로젝트: w8s/cratchit
def generate_reports():
    print '\nGenerating reports for %d team members.' % len(s['members'])

    members = s['members']

    for member in members:
        print "\nReport for %s:" % member
        # get case resolves

        projects, project_cases = reporting.get_resolved_cases(s['config'], member, today, lastweek)

        resolves = len(project_cases)

        print 'Resolves: %d' % resolves

        activity_projects, activity_cases = reporting.get_case_activity(s['config'], member, today, lastweek)

        activity = len(activity_cases)

        print 'Activity: %d' % activity

        member_repo_list, changeset_list = reporting.get_commits(s['config'], member, today, lastweek)

        commits = 0
        for item in changeset_list:
            for key, value in item.iteritems():
                commits += len(value)

        print 'Commits: %d' % commits

        member.add_overview_data({'date' : datetime.now(),
                                  'resolves' : resolves,
                                  'activity' : activity,
                                  'commits'  : commits})

        save_dir = os.path.join(s['config']['home'], "reports", member.username)

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        graph_file = graphing.graph(member, save_dir)

        reporting.report(member, save_dir, graph_file, projects, project_cases, activity_projects, activity_cases, member_repo_list, changeset_list)

    s['members'] = members
예제 #6
0
def calALL():
    
    path = opt.folders
    
    ps = []
    ss = []
    
    
    
    list_PSNR =[]
    list_SSIM =[]
    
    file_list = os.listdir(path)
 
    f = open(path+"average.txt", 'w')
    
    f.write('average (PSNR,SSIM)\n')
    f.close()
    
    file_list = sorted(file_list, key = len)
    
    for Npath in file_list:
        
        newPath = path+Npath+'/'
        averPSNR = 0
        averSSIM = 0
        count = 0

        if not (search(newPath)):
            continue 
        else:            
            f = open(newPath+"PSNR.txt", 'w')
            f.close()
    
            f = open(newPath+"SSIM.txt", 'w')
            f.close()
            
            print("*folder* = "+newPath)
            
            newfile_list = os.listdir(newPath)
            
    
            
            list_PSNR =[]
            list_SSIM =[]
            
            for i in newfile_list:
             
                if(i[-9:]=='epoch.jpg'):
                    os.remove(newPath+'epoch.jpg')
                    continue
                
                if(i[-3:]=='txt' or i[-8:]=='SSIM.jpg' or i[-8:]=='PSNR.jpg'):
                    continue
                
                img = i
                LR = newPath + img
                print('open '+ LR)
                
                HR = opt.HR
                HR = HR + img
                print('open '+HR)

                #biimg = cv2.imread(LR)
                #biimg = cv2.resize(biimg, None, fx=4, fy=4, interpolation=cv2.INTER_CUBIC)
                #cv2.imwrite(opt.HR+'../bicubic/'+i ,biimg)

                #LR = opt.HR+'../bicubic/'+i
                #print(LR)
                try:
                    one, two = PSNR.cal_PSNRandSSIM(HR, LR)
                except:
                    print('skip')
                    continue
                print(f"{one} is PSNR, {two} is SSIM")
                
                f = open(newPath+"PSNR.txt", 'a')
                f.write(img+f" PSNR is {one}\n")
                f.close()
    
                f = open(newPath+"SSIM.txt", 'a')
                f.write(img+f" SSIM is {two}\n")
                f.close()
         
                list_PSNR.append(one)
                list_SSIM.append(two) 
                
                #well.. im not good at python..
                averPSNR += one
                averSSIM += two
                
                count +=1 
                print('=============================')        
            
            graphing.graph(list_PSNR, [], newPath,'PSNR',opt.picofname)
            graphing.graph(list_SSIM, [], newPath,'SSIM',opt.picofname)                
            
            
            if(count==0):
                averPSNR = 0
                averSSIM = 0
            else:
                averPSNR /= count
                averSSIM /= count
            
            print("{} is average PSNR, {} is average SSIM".format(averPSNR,averSSIM))
            
            ps.append(averPSNR)
            ss.append(averSSIM)
            
            graphing.graph(ps, [], path,'PSNR',opt.X)
            graphing.graph(ss, [], path,'SSIM',opt.X)
            
            f = open(path+"average.txt", 'a')
            f.write("{} : {} is average PSNR, {} is average SSIM\n".format(Npath,averPSNR,averSSIM))
            f.close()
예제 #7
0
				if p[2] > 0:
					eta = 9.7
				if p[2] < 0:
					eta = -9.7
			else:
                		eta = - math.log(math.tan(pangle/2))
			eta_extra.append(eta)
	if i % 1000 == 0:
		print "DONE " + str(i)

print "count: " + str(count)

# Mass
f_m_tw = ROOT.TFile("reconstructed_top_mass_tw.root",'RECREATE')
h_m_tw = ROOT.TH1F("h",'Mass of tw Buckets',150,0,300)
graphing.graph(h_m_tw, f_m_tw, mass_tw, 'Mass (GeV)', 'Frequency', 'reconstructed_top_mass_tw')

f_m_t_ = ROOT.TFile("reconstructed_top_mass_t_.root",'RECREATE')
h_m_t_ = ROOT.TH1F("h",'Mass of t_ Buckets',150,0,300)
graphing.graph(h_m_t_, f_m_t_, mass_t_, 'Mass (GeV)', 'Frequency', 'reconstructed_top_mass_t_')

f_m_t0 = ROOT.TFile("reconstructed_top_mass_t0.root",'RECREATE')
h_m_t0 = ROOT.TH1F("h",'Mass of t0 Buckets',500,0,2000)
graphing.graph(h_m_t0, f_m_t0, mass_t0, 'Mass (GeV)', 'Frequency', 'reconstructed_top_mass_t0')

f_m_t0_z = ROOT.TFile("reconstructed_top_mass_t0_z.root",'RECREATE')
h_m_t0_z = ROOT.TH1F("h",'Mass of t0 Buckets (Zoomed in)',150,0,300)
graphing.graph(h_m_t0_z, f_m_t0_z, mass_t0, 'Mass (GeV)', 'Frequency', 'reconstructed_top_mass_t0_z')

f_m_x = ROOT.TFile("reconstructed_top_mass_x.root",'RECREATE')
h_m_x = ROOT.TH1F("h",'Mass of the Extra Buckets',110,-1,10)
예제 #8
0
def ae(x_data, y_data):

    x_train = np.array(x_data[:int(len(x_data) * TRAINING_PERCENTAGE)])
    y_train = np.array(y_data[:int(len(y_data) * TRAINING_PERCENTAGE)])

    x_test = np.array(x_data[int(len(x_data) * TRAINING_PERCENTAGE):])
    y_test = np.array(y_data[int(len(y_data) * TRAINING_PERCENTAGE):])

    x_train = flatten(x_train)
    x_test = flatten(x_test)

    autoencoder = Sequential()

    # Encoder Layers
    autoencoder.add(
        Dense(len(x_train[0]),
              input_dim=len(x_train[0]),
              activation=LeakyReLU(alpha=0.3)))
    autoencoder.add(Dense(23, activation=LeakyReLU(alpha=0.3)))
    autoencoder.add(Dense(11, activation=LeakyReLU(alpha=0.3)))

    # Decoder Layers
    autoencoder.add(Dense(11, activation=LeakyReLU(alpha=0.3)))
    autoencoder.add(Dense(23, activation=LeakyReLU(alpha=0.3)))
    autoencoder.add(Dense(len(x_train[0]), activation=LeakyReLU(alpha=0.3)))

    autoencoder.compile(optimizer='adam',
                        loss='mean_squared_error',
                        metrics=[crps])

    history = autoencoder.fit(x_train,
                              x_train,
                              epochs=100,
                              batch_size=128,
                              validation_data=(x_test, x_test),
                              verbose=2)

    graph(history, to_file='images/ae.png')

    inputs = Input(shape=(len(x_train[0]), ))
    encoder_layer1 = autoencoder.layers[0](inputs)
    encoder_layer2 = autoencoder.layers[1](encoder_layer1)
    encoder_layer3 = autoencoder.layers[2](encoder_layer2)
    encoder = Model(inputs, encoder_layer3)

    encoded_x_train = encoder.predict(x_train, verbose=2)
    print(len(encoded_x_train[0]))
    print(encoded_x_train.shape)
    encoded_x_test = encoder.predict(x_test, verbose=2)

    model = Sequential([
        Dense(45, input_dim=len(encoded_x_train[0])),
        LeakyReLU(alpha=0.3),
        Dense(22),
        LeakyReLU(alpha=0.3),
        Dense(1),
        LeakyReLU(alpha=0.3)
    ])

    model.compile(loss='mean_squared_error', optimizer='adam', metrics=[crps])

    history = model.fit(encoded_x_train,
                        y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_data=(encoded_x_test, y_test))

    # graph(history, to_file='images/ae-ffnn.png')

    #Evaluating the model
    scores = model.evaluate(encoded_x_test, y_test)
    print(f'CRPS AE: {scores[1]}')
            df_sim[comp_arr[a].name] = comp_arr[d].get_df_array()

    if run_t != 0:
        t_arr.append(run_t)
    run_t += dt


def get_vm_dict():
    vm_dict = {}
    for x in range(len(comp_arr)):
        vm_dict[comp_arr[x].name] = comp_arr[x].v_arr
    return vm_dict


vm_dict = get_vm_dict()
g1 = gr.graph(start_time=20)
fig1, ax1 = g1.graph_time_vm_allcomps(vm_dict, t_arr)
sns.despine()
fig1.show()
"""
fig_vm, (a1, a2, a3) = plt.subplots(1, 3,sharey=True)
plt.xlabel("Time (s)")
plt.ylabel("Voltage (mV)")
a1.set_title("Comp 1")
a2.set_title("Comp 2")
a3.set_title("Comp 3")

a1.plot(t_arr[45000:-1], comp_1.v_arr[45000:-1])
a2.plot(t_arr[45000:-1], comp_2.v_arr[45000:-1])
a3.plot(t_arr[45000:-1], comp_3.v_arr[45000:-1])
sns.despine()
예제 #10
0
    # quit function
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            pygame.quit()
            quit()

    # proxmitiy detection (and transmission)
    for index, node in enumerate(myvars.nodes):
        for node2 in myvars.nodes[
                index:]:  # this does not loop thru nodes that already have been checked (so we have no [node1 --> node2], and then [node2 --> node1])
            getDistance(node, node2, tick)
        if node.infected:
            node.checkImmunity(tick)
        node.move()

    drawWindow(myvars.nodes, myvars.deadNodes)

    # data export stuff
    if tick % dataLogTicks == 0:
        logData()

    if myvars.infected == 0:
        break

    pygame.display.update()

logData()
createCSV(myvars.data)
graphing.graph(myvars.data)
예제 #11
0
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 21:32:02 2018

@author: ASUS
"""

import dataclean
import graphing
import pandas as pd
import ml

fname='Dataset - Human Resource.csv'
threshold=0.5 #Results with probability above this threshold will be labelled as attrition = Yes
corrThreshold=0.05 #remove columns below this corr value

df=pd.read_csv(fname)
leandf, converteddf, cor, info, rawcorr=dataclean.dataCleanse(df)
graphs=graphing.graph(leandf,converteddf)
model, results = ml.train(converteddf, threshold)
예제 #12
0
	def debited():
		print(colored("Starting debited graph","yellow"))
		pb = loading()
		graph("Debited")
		loaded(pb)
예제 #13
0
	def credited():
		print(colored("Starting credited graph","yellow"))
		pb = loading()
		graph("Credited")
		loaded(pb)
예제 #14
0
import logging

import spreadsheet
from parse_arguments import parse_arguments
from read_responses import read_responses
from graphing import graph
from edurate_gensim import gensim_analysis

if __name__ == "__main__":
    print("Welcome to Edurate")
    print("https://github.com/Edurate/edurate")
    logging.info("Analyzes the Google form responses with gensim and " +
                 "returns the repeated words, graphs, or archives the file")
    EDU_ARGS = parse_arguments(sys.argv[1:])
    SPREADSHEET_LIST = spreadsheet.read_from_spreadsheet()
    DATA = spreadsheet.get_graph_data(SPREADSHEET_LIST)

    if EDU_ARGS.graph:
        print("Creating Graphs...")
        graph(DATA)

    spreadsheet.create_csv(SPREADSHEET_LIST)
    RESPONSES = read_responses(EDU_ARGS.file)
    RESPONSES = spreadsheet.filter_dates(RESPONSES)
    RESPONSES = spreadsheet.flip_responses(RESPONSES)

    QUESTION_NUMBER = 7
    for index, response in enumerate(RESPONSES[8:12]):
        gensim_analysis(response, QUESTION_NUMBER, EDU_ARGS.topics)
        QUESTION_NUMBER += 1
예제 #15
0
                pt_extra.append(pt)
                if pt < 0.001:
                    if p[2] > 0:
                        eta = 9.7
                    if p[2] < 0:
                        eta = -9.7
                else:
                    eta = -math.log(math.tan(pangle / 2))
                eta_extra.append(eta)
        if i % 1000 == 0:
            print "DONE " + str(i)

# Mass
f_m_tw = ROOT.TFile("reconstructed_top_mass_tw.root", 'RECREATE')
h_m_tw = ROOT.TH1F("h", 'Mass of tw Buckets', 150, 0, 300)
graphing.graph(h_m_tw, f_m_tw, mass_tw, 'Mass (GeV)', 'Frequency',
               'reconstructed_top_mass_tw')

f_m_t_ = ROOT.TFile("reconstructed_top_mass_t_.root", 'RECREATE')
h_m_t_ = ROOT.TH1F("h", 'Mass of t_ Buckets', 150, 0, 300)
graphing.graph(h_m_t_, f_m_t_, mass_t_, 'Mass (GeV)', 'Frequency',
               'reconstructed_top_mass_t_')

f_m_t0 = ROOT.TFile("reconstructed_top_mass_t0.root", 'RECREATE')
h_m_t0 = ROOT.TH1F("h", 'Mass of t0 Buckets', 150, 0, 300)
graphing.graph(h_m_t0, f_m_t0, mass_t0, 'Mass (GeV)', 'Frequency',
               'reconstructed_top_mass_t0')

f_m_x = ROOT.TFile("reconstructed_top_mass_x.root", 'RECREATE')
h_m_x = ROOT.TH1F("h", 'Mass of the Extra Buckets', 110, -1, 10)
graphing.graph(h_m_x, f_m_x, mass_extra, 'Mass (GeV)', 'Frequency',
               'reconstructed_top_mass_x')