示例#1
0
	def handleRequest(self):
		Grapher,params = decodeGraph(self.getField('p'))
		if not Grapher:
			return apache.HTTP_INTERNAL_SERVER_ERROR

		for key,value in {
			'db' : model.getDB(),
			'datapath' : '%s/' % getTempPath('data'),
			'imagepath' : '%s/' % getTempPath('images'),
			'logger' : logger,
			'graph' : False,
			'maxage': 600,
			'hidetitle' : True,
			'graph' : True,
		}.items(): params[key] = value

		graph = Grapher(**params)

		filename = graph.getFullPathImagename()

		self.req.content_type = {
			'png' : 'image/png',
			'svg' : 'image/svg+xml',
			'eps' : 'application/postscript',
			'pdf' : 'application/pdf',
		}[graph.format.lower()]	

		if not os.path.exists(filename):
			raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
		self.req.sendfile(filename)

		return apache.OK
示例#2
0
def peak_tag_corr(df):
    #Preprocessing
    peak_score = pd.DataFrame(df['Peak Score'])
    tag_data = pd.DataFrame([df.iloc[:, -i] for i in range(1, 7)]).T
    data_to_plot = pd.concat([peak_score, tag_data], axis=1)
    log_data = np.log10(data_to_plot)

    #Instantiating graph variables
    figsize = (12, 6)
    fig_name = 'Peak score for each motif tag count with log scaling'
    axis_names = [
        'Peak Score', 'CCGTCC tag count (in bp)', 'ATGTCA tag count (in bp)',
        'AGTTCC tag count (in bp)', 'AGTCAA tag count (in bp)',
        'GTCCGC tag count (in bp)', 'GTAGAG tag count (in bp)'
    ]
    cols_to_graph = list(range(7))

    #Create Grapher object and make a big scatter
    grapher = Grapher(log_data, figsize)
    #grapher = Grapher(data_to_plot, figsize)
    grapher.big_scatter(cols_to_graph,
                        shape=(2, 3),
                        fixed_y=0,
                        axis_names=axis_names,
                        fig_name=fig_name)
    plt.show()
    grapher.fig.savefig(out_dir, bbox_inches="tight", dpi=600)
示例#3
0
def player(playerID):

	player = Player(playerID)
	shotChart = player.getShotData()
	headers = shotChart['resultSets'][0]['headers']
	shots = shotChart['resultSets'][0]['rowSet']

	# print(shots)
	# print('---')

	grapher = Grapher(shots)
	x,y,shotStatus = grapher.getShotCoordinates()

	# split shot coordinates into made and miss
	madeX = []
	madeY = []
	missedX = []
	missedY = []
	for i in range(len(x)):
		if shotStatus[i] == 'Made Shot':
			madeX.append(x[i])
			madeY.append(y[i])
		else:
			missedX.append(x[i])
			missedY.append(y[i])

	file = grapher.makeGraph(x,y, madeX, madeY, missedX, missedY)

	return render_template('player.html', playerID=playerID, shotChart=file)
示例#4
0
    def __init__(self, user_id):
        self.user_id = user_id
        self.profile_url = self.profile_url + user_id
        self.post_url = self.post_url + user_id
        self.csv_file = 'csv_' + user_id + '.csv'

        self.start_scraping()
        graph = Grapher(self.csv_file)
        graph.plot()
示例#5
0
def main():
    delta_days = (dt.date.today() -
                  (dt.date.today() - relativedelta(months=1))).days
    mizore = Grapher(delta_days)
    mizore.make_graph_of_counts_per_daily("monthly_graph.png")
    mizore.make_glaph_of_counts_per_hourly("monthly_graph_per_hourly.png")
    mizore.post(("monthly_graph.png", "monthly_graph_per_hourly.png"))
示例#6
0
文件: app.py 项目: Wutus/Relativistic
 def run(self, pos1, vel1, pos2, vel2):
     self.screen = pygame.display.set_mode(self.size)
     self.coord_converter = CoordConverter((-10, 10), (-10, 10),
                                           (0, self.screen.get_size()[0]),
                                           (0, self.screen.get_size()[1]))
     pygame.display.set_caption('Relativistic')
     self.engine = Engine(pos1, vel1, pos2, vel2)
     self.time_multiplier = 1.0
     self._drawer = Drawer(self.screen, self.coord_converter)
     self._grapher = Grapher(self.screen, self.coord_converter)
     #self.drawer = Drawer(self.screen, self.coord_converter)
     self.drawer = self._drawer
     self.clock = pygame.time.Clock()
     self.clock.tick(60)
     self.running = True
     while self.running:
         for event in pygame.event.get():
             if event.type == pygame.QUIT:
                 self.running = False
             else:
                 self.interface.handle_event(event)
         self.clock.tick(60)
         if self.drawer is self._drawer:
             self.engine.adjust_bodies()
         self.engine.update(self.clock.get_time() / 1000 *
                            self.time_multiplier)
         self.draw()
示例#7
0
def main():

    # parameters: filename of your top output file, where your top output text file is saved
    tp = TopParser(filename=str(sys.argv[1]), dir_text=str(sys.argv[2])) 

    text = tp.parseText()
    data = tp.fillDict(text)
    summary = tp.getSummaryData(data)

    # parameters (leave data and summary parameter): 
    #       - filename that you want your xlsx file to be, 
    #       - where you want your xlsx file to be saved
    g = Grapher(data=data, summary=summary, filename=str(sys.argv[1]), dir_xlsx=str(sys.argv[2]))

    df1, df_np = g.convertToNumpy()
    g.createPlot(df_np)
    g.saveAsXLSX(df1)
示例#8
0
def testMeta():
    
    data = Data()
    meta = MetaNet(input_vector_size= 28*28, subnet_output_vector_size=10)
    grapher = Grapher()

    #TODO: Actually use this for something
    subNetList = []

    print("Phase 1: Train Subnet on Numbers")
    accuracy = []
    for datum in data.sub_tr:
        (img, label) = datum
        if (meta.trainSubNet(img, label) == label):
            accuracy.append(1)
        else:
            accuracy.append(0)
    grapher.addGraph(accuracy, "SubNet Train Accuracy")

    print("Phase 2: Train AlterNet on Letters")
    accuracy = []
    for datum in data.alter_tr:
        (img, label) = datum
        if (meta.trainAlterNet(img, label) == label):
            accuracy.append(1)
        else:
            accuracy.append(0)
    grapher.addGraph(accuracy, "AlterNet Train Accuracy")

    print("Phase 3: Train SuperNet")
    accuracy = []
    for datum in data.super_tr:
        (img, label, super_label) = datum
        if (meta.trainSuperNet(img, label, super_label) == super_label):
            accuracy.append(1)
        else:
            accuracy.append(0)
    grapher.addGraph(accuracy, "SuperNet Train Accuracy")

    print("Phase 4: Generate Child Network")
    (child, accuracy) = meta.generateChild(data.child_tr)
    grapher.addGraph(accuracy, "Child Generation Accuracy")

    print("Phase 5: Test Child Network")
    accuracy = []
    for datum in data.child_te:
        (img, label) = datum
        if (child.run(img) == label):
            accuracy.append(1)
        else:
            accuracy.append(0)
    grapher.addGraph(accuracy, "Child Accuracy")

    grapher.graphAll()
示例#9
0
        parsed = json.load(json_open)
        refererlist = []

        for page in parsed['log']['entries']:
            #print page['request']['method'] + ": " + page['request']['url']

            for header in page['request']['headers']:
                if header['name'] == 'Referer':
                    r = Referer(header['value'])

                    # insert referer if is not in list
                    if r not in refererlist:
                        ralt = str(header['value']).split("/")[2]
                        r.altname = ralt
                        refererlist.append(r)

                    for r in refererlist:
                        if r.referer == header['value']:
                            spliturl = str(page['request']['url']).split("/")
                            tup = (spliturl[2], spliturl[0].replace(":", ""))
                            r.appendget(tup)

        for r in refererlist: print r.referer + ",  " + r.altname

        return refererlist


ha = Haranalyzer()
refs = ha.referer(sys.argv[1])
g = Grapher()
g.referergraph(refs, "/home/snake/PycharmProjects/project2")
示例#10
0
    def extract(raw_txt, logger):

        c = Cleaner()
        cleaned_text_list = c.clean(raw_txt)

        logger.info('Done cleaning')
        logger.debug(len(cleaned_text_list))
        logger.debug(cleaned_text_list)

        matrix_builder = MatrixBuilder()
        matrix = matrix_builder.build_sim_matrix(cleaned_text_list, logger)

        logger.info('Done building sim matrix')
        logger.debug('Dimensions: {}'.format(matrix.shape))
        logger.debug(matrix)

        g = Grapher()
        pageranks = g.graph(matrix)

        logger.info('Generated graph and got pageranks')
        logger.debug(pageranks)

        total_doc_size = len(cleaned_text_list)
        if total_doc_size in range(0, 300):
            summary_length = int(0.4 * total_doc_size)
        elif total_doc_size in range(301, 800):
            summary_length = int(0.2 * total_doc_size)
        elif total_doc_size in range(801, 1500):
            summary_length = int(0.1 * total_doc_size)
        else:
            summary_length = int(0.05 * total_doc_size)

        top_ranked = nlargest(summary_length, pageranks, key=pageranks.get)
        top_ranked.sort()

        cl = Cluster()
        top_ranked = cl.splitIntoParagraph(top_ranked, 7.5)

        logger.debug(top_ranked)
        result = ''
        for paragraph in top_ranked:
            for key in paragraph:
                top_ranked_sentence = cleaned_text_list[key]
                result += '{}. '.format(top_ranked_sentence)
            result += '\n\n'

        try:
            del c
            del cleaned_text_list
            del matrix_builder
            del matrix
            del g
            del pageranks
            del total_doc_size
            del summary_length
            del top_ranked
            del cl
            del raw_txt
        except:
            pass

        return result
示例#11
0
文件: main.py 项目: platers/asset-sim
                                               step=1.0,
                                               min_value=0.0)
    assumptions.RRA = st.number_input('RRA',
                                      value=2.0,
                                      step=1.0,
                                      min_value=0.1)

# re initialize strategies based on user defined parameters
for i in range(len(strategies)):
    s = strategies[i].__class__(assumptions)
    strategies[i] = s

sim = Simulator()
df = sim.simulate(assumptions, strategies, runs=400)

gr = Grapher()
chart = gr.graph(df)
st.altair_chart(chart)
st.title('Asset Sim')

st.markdown('''
    Asset Sim is a tool to visualize long term investing strategies. Quickly simulate different strategies and market assumptions to see how they affect your finances.
    ## FAQ
    ### How is the graph generated?
    A monte-carlo simulation simulates many runs with the given assumptions and aggegates them together. 
    The lines are the median amounts of assets at a point in time. The error bands show first and third quartiles.

    ### Why median?
    Medians are less sensitive to outliers than means. A few lucky runs can blow up a mean.

    ### Where do the default values come from?
示例#12
0
from grapher import Grapher

print("Here are some sample plots for you.")
g = Grapher([])
g.examples()
from grapher import Grapher
c = Grapher('conv.json')

print('here')
示例#14
0
    stock_name = 'amzn'
    stock = pd.read_csv('data/' + stock_name + '.us.csv')

    span = 300
    window = 100
    proc = Processor(stock, window=window)
    x_test, y_test = proc.run(span=span, start=stock.shape[0] - 3 * span - 1)

    input_dim = proc.window
    models = {'NN24x6': NN24(input_dim)}
    # modes = {'buy-sell': ['BUY', 'HOLD', 'SELL'], 'long-short': ['LONG', 'SHORT']}
    modes = {'long-short': ['LONG', 'SHORT']}

    for name, model in models.items():
        title = stock_name.upper() + ' ' + name
        grapher = Grapher(title)
        with open(title.lower().replace(' ', '_') + '.txt', 'w') as f:
            for e in range(EPISODES + 1):
                x_train, y_train = proc.run(span=span)

                model.model.fit(x_train, y_train)
                y_pred = model.model.predict(x_test)

                prev_prices, prices, pred = x_test[:,
                                                   -1], y_test, y_pred.reshape(
                                                       y_pred.shape[0])
                if e % 100 == 0:
                    for mode, action_labels in modes.items():
                        trader = Trader(mode)
                        strategy = trader.to_strategy(prev_prices, pred)
                        optimal = trader.to_strategy(prev_prices, prices)
示例#15
0
        elif is_line_in_edge(boarded_line, cur_edge.source(),
                             cur_edge.target()):
            route[counter]._line = cur_line
            # print("Line Corrected")
            correct_route(route, boarded_line, remaining_route, counter)
        else:
            # print("Changed Bus")
            boarded_line = cur_edge.line()
            correct_route(route, boarded_line, remaining_route, counter)

    e = Exporter()
    db_edges = e.get_edges()
    db_vertices = e.get_vertices()
    # Initialize graph and populate it
    g = Grapher()
    g.populate_vertices(db_vertices)
    g.populate_edges(db_edges)
    g.populate_graph()
    d = Dijkstra(g.graph)
    i = Isochroner(g, 3)
    route = d.min_path(444, 728)
    route = route[1]
    geoms_to_lines = i.geoms_to_lines
    init_route = route
    init_line = route[0].line()
    init_size = len(route)
    cur_line = init_line
    counter = 0
    corrected_route = correct_route(route, 'A7', route, 0)
示例#16
0
    yaw_vel = (ps4.axis_raw[2] - 128) / 128.0
    pitch_vel = ((255 - ps4.axis_raw[3]) - 128) / 128.0
    ud_normal = cross(cross(arm.vectors[1], arm.vectors[2]), arm.vectors[2])
    rl_normal = cross(arm.vectors[2], [0, 0, 1])
    pitch_normal = cross(arm.vectors[2], arm.vectors[1])
    nav.up_down([0, 0, 1], up_down_vel)
    nav.rigth_left(rl_normal, rl_vel)
    nav.zoom(zoom_vel)
    nav.rotate_yaw(ud_normal, yaw_vel)
    nav.rotate_pitch(pitch_normal, pitch_vel)
    # inp = int(raw_input("zoom?"))
    # nav.zoom(inp)
    p = nav.position
    s = nav.vector


    arm.update_destination_point(p, s)
    lines = to_lines(arm.joint_points)
    # g.redraw_point(arm.joint_points[1])
    g.redraw(lines)


g = Grapher(lines)
t_point1 = [50, 10, 40]
t_point2 = [70, 5, 30]
g.redraw_point(t_point1)
g.redraw_point(t_point2)
g.redraw(lines)
g.show(anim)
示例#17
0
                             span=span,
                             start=stock.shape[0] - 2 * span - 1)
        env = Longshort(stock_name, window=window, span=span)
        state_size = env.observation_space.shape[0]
        action_size = env.action_space.n

        replay_agent = DQNAgent(state_size, action_size)  # model_replay = None
        agent = DQNAgent(state_size, action_size, model_replay=replay_agent)
        save_string = './save/' + stock_name + '_weights_without_fees_test.h5'
        # agent.load(save_string)
        done = False
        batch_size = 32

        title = env.symbol.upper() + ' MDP Replay ' + os.path.basename(
            __file__).split('.')[0]
        grapher = Grapher(title)

        with open('./save/losses_' + stock_name + '.txt', 'w') as f:
            for e in range(EPISODES + 1):
                # Train
                state = env.reset()
                state = np.reshape(state, [1, state_size])
                for time in range(500):
                    cash, nown, price = env.holdings[0], env.holdings[
                        1], env.state[-1]
                    # env.render()
                    action = agent.act(state, time)
                    next_state, reward, done, _ = env.step(action)
                    next_state = np.reshape(next_state, [1, state_size])
                    agent.remember(state, action, reward, next_state, done)
                    # agent.train(state, action, reward, next_state, done)
def main():
    delta_days = 7
    mizore = Grapher(delta_days)
    mizore.make_graph_of_counts_per_daily("weekly_graph.png")
    mizore.make_glaph_of_counts_per_hourly("weekly_graph_per_hourly.png")
    mizore.post(("weekly_graph.png", "weekly_graph_per_hourly.png"))
示例#19
0
                    key for key, val in modelData.items()
                    if isinstance(val, UnknownVariable)
                ]
            else:
                task = [key for key in utils.getOutputs() if key in modelData]

            #LET'S SOLVE
            if engineType not in graphed or True:
                graph = args.graph
                graphed.append(engineType)
            else:
                graph = False

            grph = Grapher(analysisName,
                           graph=graph,
                           view=(not args.noshow),
                           debug=args.debug,
                           cool=args.cool,
                           labels=args.labels)

            slvr = Solver(modelData, task, grph, logger)
            utils.tic()
            try:
                if slvr.validateTree():
                    logger.debug("Tree building took %s seconds" %
                                 (utils.toc()))
                    logger.debug("Solving tree...")
                    utils.tic()
                    res = slvr.solve()
                    logger.debug("Done! Solving took %s seconds." %
                                 (utils.toc()))
                    logger.info("Here are your outputs:\n" + utils.formatData(
示例#20
0
    def fit(self, *args, **kwargs):

        MEM_SZ = MEM_SIZE_FCL

        sess = K.get_session()
        K.set_learning_phase(1)

        self.actor = ActorNetwork(sess,
                                  self.state_dim,
                                  self.nn_action_dim,
                                  BATCH_SIZE,
                                  TAU,
                                  LRA,
                                  convolutional=CONVOLUTIONAL,
                                  output_activation=ACTION_ACTIVATION)
        self.critic = CriticNetwork(sess,
                                    self.state_dim,
                                    self.nn_action_dim,
                                    BATCH_SIZE,
                                    TAU,
                                    LRC,
                                    convolutional=CONVOLUTIONAL)

        self.memory = Memory(MEM_SZ)

        self.actor.target_model.summary()
        self.critic.target_model.summary()

        if LOAD_WEIGHTS:
            self.actor.model.load_weights(LOAD_WEIGHTS_PREFIX +
                                          "actor_model_" +
                                          LOAD_WEIGHTS_EPISODE + ".h5")
            self.critic.model.load_weights(LOAD_WEIGHTS_PREFIX +
                                           "critic_model_" +
                                           LOAD_WEIGHTS_EPISODE + ".h5")
            self.actor.target_model.load_weights(LOAD_WEIGHTS_PREFIX +
                                                 "actor_target_model_" +
                                                 LOAD_WEIGHTS_EPISODE + ".h5")
            self.critic.target_model.load_weights(LOAD_WEIGHTS_PREFIX +
                                                  "critic_target_model_" +
                                                  LOAD_WEIGHTS_EPISODE + ".h5")
            print("Weights Loaded!")

        #====================================================
        #Initialize noise processes
        #self.noise_procs = []
        #for i in range(NUM_NOISE_PROCS):
        #    self.noise_procs.append(OUProcess(OU_MEAN, OU_THETA, OU_STD_DEV))

        #====================================================

        PRE_LEARNING_EPISODES = STARTING_EPISODE + PRE_LEARNING_EPS
        steps = STARTING_EPISODE * EPISODE_LENGTH
        start_time = time.time()
        last_ep_time = time.time()
        if MAKE_PLOT:
            reward_graph = Grapher()

        for ep in range(STARTING_EPISODE, EPISODES):

            #reset noise processes
            #for ou in self.noise_procs:
            #    ou.reset()

            self.noise.reset()

            #start time counter
            if (ep == PRE_LEARNING_EPISODES):
                start_time = time.time()

            print("Episode: " + str(ep) + "  Frames: " +
                  str(ep * EPISODE_LENGTH) + "  Uptime: " + str(
                      (time.time() - start_time) / 3600.0) +
                  " hrs    ===========")

            state = self.env.reset()

            play_only = (ep % 10 == 0)

            total_reward = 0

            if play_only or ALREADY_TRAINED:
                for step in range(TEST_EPISODE_LENGTH):

                    #print ">>>>>>>>>>>>>", state.shape
                    #img = np.array([np.subtract(img, 128)], dtype=np.float32) #zero center
                    #img = np.multiply(img, 1.0/128.0) #scale [-1,1]
                    #img = np.transpose(state, (1,2,0))

                    #img = np.array(state)
                    #img = np.transpose(img, (1,2,0))

                    #print ">>>>>>>>>>>>>", state.shape

                    state = np.reshape(state, state.shape + (1, ))

                    action, control_action = self.selectAction(
                        state, can_be_random=False, use_target=True)

                    nstate, reward, done, info = self.env.step(control_action)
                    total_reward += reward
                    state = nstate
            else:
                for step in range(EPISODE_LENGTH):

                    # ACT ==============================
                    epsilon = (float(steps) / float(EPSILON_STEPS)) * (
                        EPSILON_RANGE[1] - EPSILON_RANGE[0]) + EPSILON_RANGE[0]

                    state = np.reshape(state, state.shape + (1, ))

                    action, control_action = self.selectAction(state,
                                                               epsilon=epsilon)
                    new_state, reward, done, info = self.env.step(
                        control_action)
                    done = done or (step >= EPISODE_LENGTH)
                    self.memory.addMemory(state, action, reward, new_state,
                                          done)
                    state = new_state

                    # LEARN ============================
                    if ep > PRE_LEARNING_EPISODES:
                        batch, idxs = self.memory.getMiniBatch(BATCH_SIZE)
                        self.learnFromBatch(batch)

                    if done:
                        break
                    # CLEANUP ==========================
                    steps += 1

            #we need to consider the episodes without noise to actually tell how the system is doing
            if play_only and MAKE_PLOT:
                reward_graph.addSample(total_reward)
                reward_graph.displayPlot()

            #calculate fph on total frames
            total_frames = (ep - PRE_LEARNING_EPISODES) * EPISODE_LENGTH
            elapsed = time.time() - start_time
            fps = total_frames / elapsed
            fph = fps * 3600.0

            #re-calculate fps on this episode, so it updates quickly
            fps = EPISODE_LENGTH / (time.time() - last_ep_time)
            last_ep_time = time.time()
            print("fps: " + str(fps) + "  fph: " + str(fph) + "\n")

            #save plot and weights
            if (ep > 0 and ep % EPISODE_SAVE_FREQUENCY
                    == 0) and not ALREADY_TRAINED:

                #plot
                if MAKE_PLOT:
                    reward_graph.savePlot(SAVE_WEIGHTS_PREFIX + "graph_" +
                                          str(ep) + ".jpg")

                #weights
                self.actor.model.save_weights(SAVE_WEIGHTS_PREFIX +
                                              "actor_model_" + str(ep) + ".h5",
                                              overwrite=True)
                self.actor.target_model.save_weights(
                    SAVE_WEIGHTS_PREFIX + "actor_target_model_" + str(ep) +
                    ".h5",
                    overwrite=True)
                self.critic.model.save_weights(
                    SAVE_WEIGHTS_PREFIX + "critic_model_" + str(ep) + ".h5",
                    overwrite=True)
                self.critic.target_model.save_weights(
                    SAVE_WEIGHTS_PREFIX + "critic_target_model_" + str(ep) +
                    ".h5",
                    overwrite=True)

                #network structures (although I don't think I ever actually use these)
                with open(
                        SAVE_WEIGHTS_PREFIX + "actor_model_" + str(ep) +
                        ".json", "w") as outfile:
                    json.dump(self.actor.model.to_json(), outfile)
                with open(
                        SAVE_WEIGHTS_PREFIX + "actor_target_model_" + str(ep) +
                        ".json", "w") as outfile:
                    json.dump(self.actor.target_model.to_json(), outfile)
                with open(
                        SAVE_WEIGHTS_PREFIX + "critic_model_" + str(ep) +
                        ".json", "w") as outfile:
                    json.dump(self.critic.model.to_json(), outfile)
                with open(
                        SAVE_WEIGHTS_PREFIX + "critic_target_model_" +
                        str(ep) + ".json", "w") as outfile:
                    json.dump(self.critic.target_model.to_json(), outfile)
示例#21
0
 def graph(self):
     self.grapher = Grapher(self.df, self.tags)
     self.grapher.average_rating()
     self.grapher.duration()
     self.grapher.views()
     self.grapher.gen_tags_plot()
示例#22
0
class Analysis():
    """Main class responsible for downloading and analyzing data.
    
    Parameters
    ----------
    path : str (default='data')
        The path to the directory where both raw and computed results should be stored.
        
    Attributes
    ----------
    raw : str
        Path to 'raw' directory in self.path directory
    ran : str
        Path to 'ran' directory in self.path directory
    df : Dataframe
        Pandas Dataframe used to store compiled results
    tags : [[str]]
        A list of tags for each downloaded video
    grapher : Grapher
        Creates the interactive graphs portion of the analysis
        
    seconds : int
        The sum of video durations
    formatted_time : str
        Seconds converted to W/D/H/M/S format
    all_likes : Series
        Video that has the most likes without a single dislike
    most_likes : Series
        Video with the most total likes
    most_viewed : Series
        Video with the most total views
    oldest_videos : Dataframe
        First 10 videos watched on user's account.
    oldest_upload : Series
        Video with the oldest upload date to youtube.
    HD : int
        The number of videos that have high-definition resolution
    UHD : int
        The number of videos that have ultra-high-definition resolution
    top_uploaders : Series
        The most watched channel names with corresponding video counts
    funny_counts : int
        The max number of times a video's description says the word 'funny'
    funny : Series
        The 'funniest' video as determined by funny_counts
    """
    def __init__(self, path='data'):
        self.path = path
        self.raw = os.path.join(self.path, 'raw')
        self.ran = os.path.join(self.path, 'ran')
        self.df = None
        self.tags = None
        self.grapher = None

        self.seconds = None
        self.formatted_time = None
        self.all_likes = None
        self.most_liked = None
        self.most_viewed = None
        self.oldest_videos = None
        self.oldest_upload = None
        self.HD = None
        self.UHD = None
        self.top_uploaders = None
        self.funny = None
        self.funny_counts = None

    def download_data(self):
        """Uses youtube_dl to download individual json files for each video."""
        print('There\'s no data in this folder. Let\'s download some.')
        successful_login = False
        while not successful_login:
            successful_login = True
            user = input('Google username: '******'Google password: '******'%(autonumber)s')
            if not os.path.exists(self.raw):
                os.makedirs(self.raw)
            cmd = ('youtube-dl -u "{}" -p "{}" ' + '-o "{}" ' +
                   '--skip-download --write-info-json -i ' +
                   'https://www.youtube.com/feed/history ').format(
                       user, pw, files)
            p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
            while True:
                line = p.stdout.readline().decode("utf-8").strip()
                print(line)
                if line == 'WARNING: unable to log in: bad username or password':
                    successful_login = False
                if not line: break

    def df_from_files(self):
        """Constructs a Dataframe from the downloaded json files.
        
        All json keys whose values are not lists are compiled into the dataframe.
        The dataframe is then saved as a csv file in the self.ran directory.
        The tags of each video are pickled and saved as tags.txt
        """
        print('Creating dataframe...')
        num = len(
            [name for name in os.listdir(self.raw) if not name[0] == '.'])
        files = os.path.join(self.raw, '~.info.json')  # This is a weird hack
        files = files.replace(
            '~', '{:05d}')  # It allows path joining to work on Windows
        data = [json.load(open(files.format(i))) for i in range(1, num + 1)]

        columns = ['formats', 'tags', 'categories', 'thumbnails']
        lists = [[], [], [], []]
        deletes = {k: v for k, v in zip(columns, lists)}
        for dt in data:
            for col, ls in deletes.items():
                ls.append(dt[col])
                del dt[col]

        self.df = pd.DataFrame(data)
        self.df['upload_date'] = pd.to_datetime(self.df['upload_date'],
                                                format='%Y%m%d')
        self.df.to_csv(os.path.join(self.ran, 'df.csv'))

        self.tags = deletes['tags']
        pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))

    def make_wordcloud(self):
        """Generate the wordcloud file and save it to static/images/."""
        #plt.rcParams['figure.figsize'] = [24.0, 18.0]
        print('Creating wordcloud')
        flat_tags = [item for sublist in self.tags for item in sublist]
        wordcloud = WordCloud(width=1920, height=1080, relative_scaling=.5)
        wordcloud.generate(' '.join(flat_tags))
        wordcloud.to_file(os.path.join('static', 'images', 'wordcloud.png'))

    def check_df(self):
        """Create the dataframe and tags from files if file doesn't exist."""
        if not os.path.exists(self.ran):
            os.makedirs(self.ran)
        df_file = os.path.join(self.ran, 'df.csv')
        if os.path.isfile(df_file):
            self.df = pd.read_csv(df_file, index_col=0, parse_dates=[-11])
            self.tags = pickle.load(
                open(os.path.join(self.ran, 'tags.txt'), 'rb'))
            self.df['upload_date'] = pd.to_datetime(self.df['upload_date'])
        else:
            self.df_from_files()

    def total_time(self):
        """The amount of time spent watching videos."""
        self.seconds = self.df.duration.sum()
        seconds = self.seconds
        intervals = (
            ('weeks', 604800),  # 60 * 60 * 24 * 7
            ('days', 86400),  # 60 * 60 * 24
            ('hours', 3600),  # 60 * 60
            ('minutes', 60),
            ('seconds', 1))

        result = []

        for name, count in intervals:
            value = seconds // count
            if value:
                seconds -= value * count
                if value == 1:
                    name = name.rstrip('s')
                result.append("{} {}".format(value, name))
        self.formatted_time = ', '.join(result)

    def worst_videos(self):
        """Finds the lowest rated and most disliked videos"""
        df_liked = self.df[self.df.like_count > 0]
        self.lowest_rating = df_liked.ix[df_liked['average_rating'].idxmin()]
        self.most_disliked = self.df.ix[self.df['dislike_count'].idxmax()]

    def best_videos(self):
        """Finds well liked and highly viewed videos"""
        all_likes = self.df[self.df.average_rating == 5]
        all_likes = all_likes.sort_values('like_count', ascending=False)
        self.all_likes = all_likes.iloc[0]

        self.most_liked = self.df.ix[self.df['like_count'].idxmax()]
        self.most_viewed = self.df.ix[self.df['view_count'].idxmax()]

    def funniest_description(self):
        """Counts number of times 'funny' is in each description. Saves top result."""
        funny_counts = []
        descriptions = []
        index = []
        for i, d in enumerate(self.df.description):
            try:
                funny_counts.append(d.lower().count('funny'))
                descriptions.append(d)
                index.append(i)
            except AttributeError:
                pass
        funny_counts = np.array(funny_counts)
        funny_counts_idx = funny_counts.argmax()
        self.funny_counts = funny_counts[funny_counts_idx]
        if self.funny_counts > 0:
            self.funny = self.df.iloc[index[funny_counts_idx]]
        else:
            self.funny = 'Wait, 0? You\'re too cool to watch funny videos on youtube?'

    def three_randoms(self):
        """Finds results for video resolutions, most popular channels, and funniest video."""
        self.HD = self.df[(720 <= self.df.height)
                          & (self.df.height <= 1080)].shape[0]
        self.UHD = self.df[self.df.height > 1080].shape[0]
        self.top_uploaders = self.df.uploader.value_counts().head(n=15)
        self.funniest_description()

    def compute(self):
        print('Computing...')
        self.total_time()
        self.worst_videos()
        self.best_videos()
        self.oldest_videos = self.df[['title', 'webpage_url']].tail(n=10)
        self.oldest_upload = self.df.ix[self.df['upload_date'].idxmin()]
        self.three_randoms()

    def graph(self):
        self.grapher = Grapher(self.df, self.tags)
        self.grapher.average_rating()
        self.grapher.duration()
        self.grapher.views()
        self.grapher.gen_tags_plot()

    def start_analysis(self):
        self.check_df()
        if WordCloud is not None:
            self.make_wordcloud()
        self.compute()
        self.graph()

    def run(self):
        """Main function for downloading and analyzing data."""
        file1 = os.path.join(self.raw, '00001.info.json')
        some_data = os.path.isfile(file1)
        if not some_data:
            self.download_data()
        some_data = os.path.isfile(file1)
        if some_data:
            self.start_analysis()
        else:
            print('No data was downloaded.')
示例#23
0
 def graph(self):
     self.grapher = Grapher(self.df, self.tags)
     self.grapher.average_rating()
     self.grapher.duration()
     self.grapher.views()
     self.grapher.gen_tags_plot()
示例#24
0
from grapher import Grapher
from writer import RowWriter, ColumnWriter
from parameters import Parameters

params = Parameters()
params.use_electrosprayed_parameters()

excel_writer = RowWriter()
excel_writer.save_row('Mix,Measurement,Result,Diff')

subtracted_writer = ColumnWriter()

for sheet in params.sheets:
    print('\n======== ' + sheet + ' ========')
    grapher = Grapher()
    grapher.load_sheet_from_excel(params.filename, sheet)
    grapher.set_params(params)

    subtracted_writer.save_column('Wavelength',
                                  grapher.sheet.get_measurements('Wavelength'))

    for mixture in params.mixtures:
        resultMap = {}

        column_names = [
            c for c in list(grapher.sheet.dataframe) if c != 'Wavelength'
        ]
        for column_name in column_names:
            print('\n' + column_name)

            grapher.calc_exp_model('Wavelength', column_name)
示例#25
0
class Analysis():
    """Main class responsible for downloading and analyzing data.

    Parameters
    ----------
    path : str (default='data')
        The path to the directory where both raw and computed results should be stored.
    delay: float (default=0)
        Amount of time in seconds to wait between requests.

    Attributes
    ----------
    raw : str
        Path to 'raw' directory in self.path directory
    ran : str
        Path to 'ran' directory in self.path directory
    df : Dataframe
        Pandas Dataframe used to store compiled results
    tags : [[str]]
        A list of tags for each downloaded video
    grapher : Grapher
        Creates the interactive graphs portion of the analysis

    seconds : int
        The sum of video durations
    formatted_time : str
        Seconds converted to W/D/H/M/S format
    all_likes : Series
        Video that has the most likes without a single dislike
    most_likes : Series
        Video with the most total likes
    most_viewed : Series
        Video with the most total views
    oldest_videos : Dataframe
        First 10 videos watched on user's account.
    oldest_upload : Series
        Video with the oldest upload date to youtube.
    HD : int
        The number of videos that have high-definition resolution
    UHD : int
        The number of videos that have ultra-high-definition resolution
    top_uploaders : Series
        The most watched channel names with corresponding video counts
    funny_counts : int
        The max number of times a video's description says the word 'funny'
    funny : Series
        The 'funniest' video as determined by funny_counts
    """
    def __init__(self, path='data', delay=0):
        self.path = path
        self.delay = delay
        self.raw = os.path.join(self.path, 'raw')
        self.ran = os.path.join(self.path, 'ran')
        self.df = None
        self.tags = None
        self.grapher = None

        self.seconds= None
        self.formatted_time = None
        self.all_likes = None
        self.most_liked = None
        self.most_viewed = None
        self.oldest_videos = None
        self.oldest_upload = None
        self.HD = None
        self.UHD = None
        self.top_uploaders = None
        self.funny = None
        self.funny_counts = None

    def download_data(self):
        """Uses youtube_dl to download individual json files for each video."""
        print('There\'s no data in this folder. Let\'s download some.')
        successful_login = False
        while not successful_login:
            successful_login = True
            user = input('Google username: '******'Google password: '******'%(autonumber)s')
            if not os.path.exists(self.raw):
                os.makedirs(self.raw)
            template = ('youtube-dl -u "{}" -p "{}" '
                        '-o "{}" --sleep-interval {} '
                        '--skip-download --write-info-json -i '
                        'https://www.youtube.com/feed/history ')
            fake = template.format(user, '[$PASSWORD]', files, self.delay)
            print(f'Executing youtube-dl command:\n\n{fake}\n')
            cmd = template.format(user, pw, files, self.delay)
            p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
            while True:
                line = p.stdout.readline().decode("utf-8").strip()
                print(line)
                if line == 'WARNING: unable to log in: bad username or password':
                    successful_login = False
                if not line:
                    break

    def df_from_files(self):
        """Constructs a Dataframe from the downloaded json files.

        All json keys whose values are not lists are compiled into the dataframe.
        The dataframe is then saved as a csv file in the self.ran directory.
        The tags of each video are pickled and saved as tags.txt
        """
        print('Creating dataframe...')
        num = len([name for name in os.listdir(self.raw) if not name[0]=='.'])
        files = os.path.join(self.raw, '~.info.json') # This is a weird hack
        files = files.replace('~', '{:05d}') # It allows path joining to work on Windows
        data = [json.load(open(files.format(i))) for i in range(1, num + 1)]

        columns = ['formats', 'tags', 'categories', 'thumbnails']
        lists = [[], [], [], []]
        deletes = {k:v for k, v in zip(columns, lists)}
        for dt in data:
            for col, ls in deletes.items():
                ls.append(dt[col])
                del dt[col]

        self.df = pd.DataFrame(data)
        self.df['upload_date'] = pd.to_datetime(self.df['upload_date'], format='%Y%m%d')
        self.df.to_csv(os.path.join(self.ran, 'df.csv'))

        self.tags = deletes['tags']
        pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))

    def make_wordcloud(self):
        """Generate the wordcloud file and save it to static/images/."""
        #plt.rcParams['figure.figsize'] = [24.0, 18.0]
        print('Creating wordcloud')
        flat_tags = [item for sublist in self.tags for item in sublist]
        wordcloud = WordCloud(width=1920,
                              height=1080,
                              relative_scaling=.5)
        wordcloud.generate(' '.join(flat_tags))
        wordcloud.to_file(os.path.join('static', 'images', 'wordcloud.png'))

    def check_df(self):
        """Create the dataframe and tags from files if file doesn't exist."""
        if not os.path.exists(self.ran):
            os.makedirs(self.ran)
        df_file = os.path.join(self.ran, 'df.csv')
        if os.path.isfile(df_file):
            self.df = pd.read_csv(df_file, index_col=0, parse_dates=[-11])
            self.tags = pickle.load(open(os.path.join(self.ran, 'tags.txt'), 'rb'))
            self.df['upload_date'] = pd.to_datetime(self.df['upload_date'])
        else:
            self.df_from_files()

    def total_time(self):
        """The amount of time spent watching videos."""
        self.seconds = self.df.duration.sum()
        seconds = self.seconds
        intervals = (
            ('weeks', 604800),  # 60 * 60 * 24 * 7
            ('days', 86400),    # 60 * 60 * 24
            ('hours', 3600),    # 60 * 60
            ('minutes', 60),
            ('seconds', 1)
            )

        result = []

        for name, count in intervals:
            value = seconds // count
            if value:
                seconds -= value * count
                if value == 1:
                    name = name.rstrip('s')
                result.append("{} {}".format(value, name))
        self.formatted_time = ', '.join(result)

    def worst_videos(self):
        """Finds the lowest rated and most disliked videos"""
        self.df['total_votes'] = self.df['like_count'] + self.df['dislike_count']
        self.df['average_rating'] = self.df['like_count'] / self.df['total_votes']
        df_voted = self.df[self.df['total_votes'] > 0]
        self.lowest_rating = df_voted.loc[df_voted['average_rating'].idxmin()]
        self.most_disliked = self.df.loc[self.df['dislike_count'].idxmax()]

    def best_videos(self):
        """Finds well liked and highly viewed videos"""
        all_likes = self.df[(self.df['like_count'] > 0) & (self.df['dislike_count'] == 0)]
        all_likes = all_likes.sort_values('like_count', ascending=False)
        try:
            self.all_likes = all_likes.iloc[0]
        except IndexError:
            pass
        self.most_liked = self.df.loc[self.df['like_count'].idxmax()]
        self.most_viewed = self.df.loc[self.df['view_count'].idxmax()]

    def funniest_description(self):
        """Counts number of times 'funny' is in each description. Saves top result."""
        funny_counts = []
        descriptions = []
        index = []
        for i, d in enumerate(self.df.description):
            try:
                funny_counts.append(d.lower().count('funny'))
                descriptions.append(d)
                index.append(i)
            except AttributeError:
                pass
        funny_counts = np.array(funny_counts)
        funny_counts_idx = funny_counts.argmax()
        self.funny_counts = funny_counts[funny_counts_idx]
        if self.funny_counts > 0:
            self.funny = self.df.iloc[index[funny_counts_idx]]
        else:
            self.funny = 'Wait, 0? You\'re too cool to watch funny videos on youtube?'

    def three_randoms(self):
        """Finds results for video resolutions, most popular channels, and funniest video."""
        self.HD = self.df[(720 <= self.df.height) & (self.df.height <= 1080)].shape[0]
        self.UHD = self.df[self.df.height > 1080].shape[0]
        self.top_uploaders = self.df.uploader.value_counts().head(n=15)
        self.funniest_description()

    def compute(self):
        print('Computing...')
        self.total_time()
        self.worst_videos()
        self.best_videos()
        self.oldest_videos = self.df[['title', 'webpage_url']].tail(n=10)
        self.oldest_upload = self.df.loc[self.df['upload_date'].idxmin()]
        self.three_randoms()

    def graph(self):
        self.grapher = Grapher(self.df, self.tags)
        self.grapher.average_rating()
        self.grapher.duration()
        self.grapher.views()
        self.grapher.gen_tags_plot()

    def start_analysis(self):
        self.check_df()
        if WordCloud is not None:
            self.make_wordcloud()
        self.compute()
        self.graph()

    def run(self):
        """Main function for downloading and analyzing data."""
        file1 = os.path.join(self.raw, '00001.info.json')
        some_data = os.path.isfile(file1)
        if not some_data:
            self.download_data()
        some_data = os.path.isfile(file1)
        if some_data:
            self.start_analysis()
        else:
            print('No data was downloaded.')
示例#26
0
    utils.exit_unless_accessible(args.calls)
    utils.setup_logging(verbosity=args.verbose)

    # Load graph database (remove duplicates)
    df_all = df_from_csv_file(args.calls)
    df = df_all.drop_duplicates()

    from_fun, to_fun = args.from_function, args.to_function
    left, right = search_settings(args.direction, args.cutoff)

    merge_on = ["caller_filename", "caller_function", "callee_filename", "callee_function"]
    chains_df_right = pd.DataFrame(columns=merge_on)
    if right:
        chains_df_right = find_chains_directed_df(df, from_fun, to_fun, right)

    chains_df_left = pd.DataFrame(columns=merge_on)
    if left:
        chains_df_left = find_chains_directed_df(df, from_fun, to_fun, left)

    _LOGGER.info("Generating the results...")
    df_chains = pd.concat([chains_df_left, chains_df_right]).drop_duplicates()
    df_chains = pd.merge(df_all, df_chains, on=merge_on, how='inner')
    if args.out.endswith(".csv"):
        df_to_csv_file(df_chains, args.out)
    else:
        grapher = Grapher(args.out)
        grapher.graph(df_chains)
        grapher.render(args.out)

    _LOGGER.info("Done")
示例#27
0
from congress_data_cleaner import CongressDataCleaner
from grapher import Grapher
from plotter import Plotter
'''
Example script for running full analysis on govtrack cosponsor dataset
'''

congress_data_cleaner = CongressDataCleaner(
    'data/govtrack_cosponsor_data/raw', 'data/govtrack_cosponsor_data/clean/',
    'data/party_affiliation/')
congress_data_cleaner.clean_all_data()

grapher = Grapher('data/govtrack_cosponsor_data/clean/full/',
                  'analysis/govtrack_cosponsor_data')
grapher.get_all_counts()

plotter = Plotter('analysis/govtrack_cosponsor_data/counts',
                  'plots/govtrack_cosponsor_data', 100, 20)
plotter.plot_all()
示例#28
0
    lines = []
    updatexy(t)
    p[2] = y + 40
    p[1] = x
    test.update_destination_point(p, s)
    lines = to_lines(test.joint_points)
    lines2 = to_lines(test.foward_model(test.joint_angle_only))

    for _line in lines2:
        lines.append(_line)
    g.redraw(lines)
    #    sys.stdout.write("\r" + test.return_model())
    print test.return_model_for_low_level()
    test.send_serial()
    sys.stdout.flush()


test = RoverArm([50, 40, 15])
test.update_destination_point([40, 0, 40], [1, 0, 0])
test.ros_begin()
test.send_serial()
lines = to_lines(test.joint_points)

# test.establish_serial_connection()
# test.serial_write()

g = Grapher(lines)
g.redraw(lines)
while not test.my_rospy.is_shutdown():
    g.show(anim)
示例#29
0
from flask.ext.script import Manager, Server
from grapher import Grapher
from vertex import settings

if __name__ == "__main__":
    grapher = Grapher(__name__)

    manager = Manager(grapher.app)
    manager.add_command('runserver', Server(port=80))

    manager.run()