Esempio n. 1
0
def test():
    try:
        path = "./test-data/example.pkl"
        directory, _ = os.path.split(path)
        save(path, {"some": list(range(100))})
        ob = load(path)
        assert ob["some"] == list(range(100)), 'object should be correct'
        # save again
        save(path, {"some": list(range(200))})
        ob = load(path)
        assert ob["some"] == list(range(200)), 'object should be correct'

        # Now turn on `no_overwrite` flag
        try:
            has_erred = False
            save(path, {"some": list(range(200))}, no_overwrite=True)
        except:
            has_erred = True
        if not has_erred:
            raise Exception('Should raise file already exist error.')
    except Exception as e:
        raise e
    finally:
        # clean up afterward.
        shutil.rmtree("./test-data")
Esempio n. 2
0
    def __init__(self, center, end_timer=False, breakable=False, damage=0):
        """
        Create the wall and its location
        :param center: the center of the sprite
        :param image: a pygame surface associated with the wall's texture
        :param damage_player: Boolean. True if touching the wall hurts the player
        :param end_timer: Boolean. True if touching the wall ends the game timer.
        :param breakable: Boolean. True if the player's explosives can destroy the wall.
        :param damage: Int. The amount of damage per tick to deal.
        """
        super().__init__()

        self.end_timer = end_timer
        self.breakable = breakable
        self.damage = damage

        if breakable:
            self.image = h.load('broken_stone.png')
        elif damage:
            self.image = h.load('spikes.png')
        else:
            self.image = h.load('stone.png')

        self.rect = self.image.get_rect()
        self.rect.center = center
Esempio n. 3
0
 def __get_eigenface_file_from_image(cls, model_file, image_file, size):
     model = helpers.load(model_file)
     im = helpers.get_nparray_from_img(image_file, size)
     eigenface = model.transform([im])
     eigenface_file = helpers.get_temp_filename()
     helpers.dump(eigenface, eigenface_file, compress_level=3)
     return eigenface_file
Esempio n. 4
0
 def __get_eigenface_file_from_image(cls, model_file, image_file, size):
     model = helpers.load(model_file)
     im = helpers.get_nparray_from_img(image_file, size)
     eigenface = model.transform([im])
     eigenface_file = helpers.get_temp_filename()
     helpers.dump(eigenface, eigenface_file, compress_level=3)
     return eigenface_file
Esempio n. 5
0
def pre_process_task2():

    # imageId to Index
    IdsIdxMap = hp.load("IdsIdxMap.txt")

    # Index to Image Id
    IdxIdsMap = hp.load("IdxIdsMap.txt")
    b = hp.load("ImageImageSimilarityMatrix"+ str(k) +".txt")

    #adjecenty matrix
    adj_matrix = np.zeros((len(IdxIdsMap), len(IdxIdsMap)))
    weighted_adj_matrix = np.zeros((len(IdxIdsMap), len(IdxIdsMap)))

    for i in range(0, len(adj_matrix)):
        q = b[IdxIdsMap[i]]
        for value in q:
            adj_matrix[i][IdsIdxMap[value]] = 1.0;
            weighted_adj_matrix[i][IdsIdxMap[value]] = q[value]

    A = np.array(adj_matrix)
    W = np.array(weighted_adj_matrix)

    D_out = np.eye(A.shape[0]) * np.sum(A, axis=1, keepdims=True)
    D_out = np.sqrt(pinv(D_out))
    D_out = scipy.sparse.csr_matrix(D_out)

    D_in = np.eye(A.shape[0]) * np.sum(A, axis=0, keepdims=True)
    D_in = np.sqrt(pinv(D_in))
    D_in = scipy.sparse.csr_matrix(D_in)

    B = csr_matrix.dot(csr_matrix.dot(csr_matrix.dot(csr_matrix.dot(D_out,A),D_in),A.T),D_out)

    C = csr_matrix.dot(csr_matrix.dot(csr_matrix.dot(csr_matrix.dot(D_in,A.T),D_out),A),D_in)

    sparse_A_matrix = scipy.sparse.csr_matrix(A)
    scipy.sparse.save_npz(os.getcwd()+'/data/A_'+str(k)+'.npz', sparse_A_matrix)

    sparse_W_matrix = scipy.sparse.csr_matrix(W)
    scipy.sparse.save_npz(os.getcwd()+'/data/W_'+str(k)+'.npz', sparse_W_matrix)

    sparse_B_matrix = scipy.sparse.csr_matrix(B)
    scipy.sparse.save_npz(os.getcwd()+'/data/B_'+str(k)+'.npz', sparse_B_matrix)

    sparse_C_matrix = scipy.sparse.csr_matrix(C)
    scipy.sparse.save_npz(os.getcwd()+'/data/C_'+str(k)+'.npz', sparse_C_matrix)
Esempio n. 6
0
 def setUpClass(cls):
     cls.bc, cls.ffi = load("main", "definitions.h")
     cls.nullptr = cls.ffi.NULL
     cls.usart_eot = int.to_bytes(cls.bc.USART_EOT, 1, byteorder="little")
     cls.usart_eot = cls.usart_eot * cls.bc.USART_EOT_COUNT
     cls.bc.SYSTEM_run = False
     cls.bc.test()
     cls.bc.SYSTEM_status = cls.bc.SYSTEM_STATUS_OPERATIONAL
     write_usart(cls.bc, 0x88, b"")  # ping
    def fw(self,
           iterations,
           keep_track=False,
           need_restart=False,
           backup_file=None):
        """
        iterations is an integer denoting the number of iterations in the
        Frank-Wolfe algorithm.
        """
        if need_restart:
            backup = load(backup_file
                          )  # backup is a list in the format [y, track, bases]
            x0 = backup[0].copy()
            track = backup[1].copy()
            bases = backup[2]
            i = max(track)
            time_passed = track[i][0]
        else:
            x0 = self.initial_point.copy()
            time_passed = 0.0
            track = dict()
            bases = []
            i = 0

        y = x0.copy()
        gamma = 1.0 / iterations

        logging.info('Starting Frank-Wolfe...')
        for t in range(i, iterations):
            start = time()
            logging.info('iteration #' + str(t) + "\n")
            gradient = self.estimator.estimate(y)[0]
            mk = self.linear_solver.solve(gradient)  # finds maximum
            # sys.stderr.write("mk: " + str(mk))
            try:
                for value in mk.values():  # updates y
                    for i in value:
                        y[i] += gamma
                # y = {i: y[i] + gamma for value in mk.values() for i in value}
            except AttributeError:
                for i in mk:
                    y[i] += gamma
                # y = {i: y[i] + gamma for i in mk}
            if keep_track or t == iterations - 1:
                time_passed += time() - start
                new_y = y.copy()
                track[t] = (time_passed, new_y)
            bases.append(mk)
            # sys.stderr.write("y: " + str(y))
            save(backup_file, [y, track, bases])
            print("y after iteration %d is: " % t + str(y))
        return y, track, bases
Esempio n. 8
0
    def drop_bomb(self):
        """
        Drop a bomb that destroys surrounding blocks and damages enemies.

        :returns bomb: A bomb entity
        """
        if not settings['GOD MODE']:
            self.bombs -= 1
        if self.last_motion == "right":
            x = 13
        elif self.last_motion == "left":
            x = -13
        else:
            x = 0
        bomb = entities.Bomb(h.load('bomb.png'), self.rect.center, x, -20, self.bomb_range)
        return bomb
Esempio n. 9
0
    def __init__(self, seed=None):
        """
        Create the room based on a certain room_array

        room_array is a list of strings that will be rendered into the room

        :param seed: The seed to use to generate the world. Passed from
            the generateworld() operation to allow for users to save everything
            about a room.
        """

        self.logger = logging.getLogger('mineEye.world.World')

        self.seed = seed
        random.seed(self.seed)

        self.run_timer = True

        self.all_sprites = pygame.sprite.Group()
        self.block_list = pygame.sprite.Group()
        self.spikes_list = pygame.sprite.Group()
        self.drops_list = pygame.sprite.Group()
        self.enemy_list = pygame.sprite.Group()
        self.enemy_projectile_list = pygame.sprite.Group()
        self.hero_projectile_list = pygame.sprite.Group()
        self.bomb_list = pygame.sprite.Group()

        self.nodes = h.Graph()

        self.background_string = 'background.png'
        self.background = h.create_background(h.load(self.background_string))
        self.region = None

        self.weapon_factor = 6  # tenths of a percent chance of spawning a weapon a a given node

        self.xspeed = 0
        self.yspeed = 0

        self.xshift = 0
        self.yshift = 0

        self.base_y_gravity = -3
        self.gravity_acceleration = -1

        self.room_array = []

        self.array_parsed = False
Esempio n. 10
0
def main():

    if not os.path.isfile('parameters.p'):
        # Load images for camera calibration
        cal_images = glob.glob("./camera_cal/calibration*.jpg")

        # Calibrate camera. Do this only once.
        mtx, dist = helpers.calibrate_camera(cal_images, nx=9, ny=5)

        # Get M and Minv
        img = mpimg.imread('./test_images/test1.jpg')
        img_undist = cv2.undistort(img, mtx, dist, None, mtx)
        warped_img, M = helpers.warp(img_undist)
        unwarped_img, Minv = helpers.unwarp(warped_img, img_undist.shape)

        persistables = {'mtx': mtx, 'dist': dist, 'M': M, 'Minv': Minv}
        helpers.persist('parameters.p', persistables, './')
    else:
        params = helpers.load('parameters.p', './')
        mtx = params['mtx']
        dist = params['dist']
        M = params['M']
        Minv = params['Minv']

    # Ok, now we have M and Minv which we will use for image warping and unwarping in all frames
    process = lambda image: process_image(image, mtx, dist, M, Minv)

    #output = 'project_video_result.mp4'
    #clip1 = VideoFileClip("project_video.mp4")

    output = 'challenge_video_result.mp4'
    clip1 = VideoFileClip("challenge_video.mp4")

    #output = 'harder_challenge_video_result.mp4'
    #clip1 = VideoFileClip('harder_challenge_video.mp4')

    clip = clip1.fl_image(process)

    print("start video processing...")
    clip.write_videofile(output, audio=False)
Esempio n. 11
0
    def draw(self, screen):
        """
        Draw everything in the room

        :param screen: A pygame surface to blit everything onto.
        """
        if self.background.get_size() != screen.get_size():
            self.background = h.create_background(h.load(self.background_string))
        screen.blit(self.background, (0, 0))

        if not self.array_parsed:
            self.parse_room_array()
            self.add_weapons_to_world()

        for e in self.enemy_list:
            e.draw(screen)

        self.block_list.draw(screen)
        self.spikes_list.draw(screen)
        self.drops_list.draw(screen)
        self.enemy_projectile_list.draw(screen)
        self.hero_projectile_list.draw(screen)
        self.bomb_list.draw(screen)
Esempio n. 12
0
#     estimated_fun = sum(estimated_fun)
#     return estimated_fun

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='Test Module for ...',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--problem',
        type=str,
        help='If the problem instance is created before, provide it here to save'
        ' time instead of recreating it.')
    args = parser.parse_args()
    num_of_samples = 100
    # graphs = load('datasets/one_graph_file')
    newProblem = load(args.problem)  # InfluenceMaximization(graphs, 1)

    # print("g^2(x) is: " + str((g**2).coefficients))

    # x = sp.Symbol('x')
    # f = sp.ln(x + 1)
    # x_lims = [0, 1]
    vectors = []
    for n in range(num_of_samples):
        binary_vector = map(
            int, list(bin(random.getrandbits(newProblem.problemSize))[2:]))
        if len(binary_vector) < newProblem.problemSize:
            binary_vector = [0] * (newProblem.problemSize -
                                   len(binary_vector)) + binary_vector
        # random_list = random.getrandbits(newProblem.problemSize)
        y = dict(zip(newProblem.groundSet, binary_vector))
Esempio n. 13
0
 def __init__(self, config):
     self.config = config
     (self.helpers, self.mods) = helpers.load(config)
     self.browser = browser.SingletonBrowser()
Esempio n. 14
0
 def setUpClass(cls):
     cls.bc, cls.ffi = load("main", "definitions.h")
     cls.nullptr = cls.ffi.NULL
Esempio n. 15
0
import flask
from flask import render_template
import helpers

app = flask.Flask(__name__)
helpers.load(app)

@app.route("/")
def home():
    return render_template("home.html")

@app.route("/saludar/<nombre>")
def saludar(nombre):
    return "Hola <b>{nombre}</b> como estas?".format(nombre=nombre)

if __name__ == '__main__':
    app.run(debug=True)
import tensorflow as tf
import numpy as np
import time
import gym
import helpers

# DONE: produce learned trajectory from expert
# DONE: learn from expert trajectory
# DONE: visualization and demos (in jupyter notebook)

# Try to load the data
try:
    expert_data = helpers.load('./imitation-data/cartpole_vpc_expert.tau')
    print('expert data is loaded!')
except:
    raise Exception('Can not load expert data.')


def policy_gradient():
    """usage:

    when running episode: (inside run_episode)
        sess_2.run(action_p, feed_dict={state: [s]})

    when running optimization:
        sess_2.run(optimizer, feed_dict={state: states, actions: [actions]})
    """
    with tf.variable_scope("policy"):
        # Placeholders
        state_placeholder = tf.placeholder("float", [None, 4])
        actions_placeholder = tf.placeholder("int32", [None])
Esempio n. 17
0
    #X, track = GREEDY(P)
    #print track[-2]
    logging.info('Greedy Algorithm is initiated...')
    directory_output = "results/greedy/"
    if not os.path.exists(directory_output):
        os.makedirs(directory_output)
    logging.info('...output directory is created...')
    #outputfile = problem_instance
    #output = dir_output+outputfile
    #np.save(output,track)

    if args.problem_type == "IM":
        logging.info('Problem Type is selected as Influence Maximization...')
        problem_instance = "IM_" + "epinions100_recall"
        logging.info('Loading graphs list...')
        graphs = load("datasets/test_graphs_file")
        # new_graph = DiGraph()
        # new_graph.add_nodes_from([1, 2, 3, 4, 5, 6])
        # new_graph.add_edges_from([(1, 2), (1, 3), (1, 4), (2, 3), (3, 4), (4, 5), (4, 6), (6, 3)])
        # graphs = [new_graph]
        # sys.stderr.write("graphs is: " + str(graphs))
        logging.info(
            '...done. Initiating the Influence Maximization Problem...')
        newProblem = InfluenceMaximization(graphs, 3)
        logging.info('...done. Starting the greedy algorithm...')
        y, track = greedy(newProblem)
        logging.info('...done.')
        output = directory_output + problem_instance
        logging.info('Saving the results of the greedy algorithm...')
        save(output, track)
        logging.info('...done. Simulation is finished.')
Esempio n. 18
0
 def setUpClass(cls):
     cls.bc, cls.ffi = load("main", "definitions.h")
     cls.nullptr = cls.ffi.NULL
     cls.bc.SYSTEM_run = False
     cls.bc.test()
     random.seed()
Esempio n. 19
0
import argparse
import datetime
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

if __name__ == "__main__":
    path = "results/continuous_greedy/IM/epinions_50"  # "results/continuous_greedy/FL/ratings_10"
    files = os.listdir(path)
    for file in files:
        plt.figure()
        result = load(
            path + '/' + file
        )  # result is a file with lines (y, F(y), time_passed, FW_iterations,
        # estimator_type, degree, center) or (y, F(y), time_passed, FW_iterations, estimator_type, num_of_samples)
        solutions = []  # fractional vectors y
        objectives = [
        ]  # F(y) where F is the multilinear relaxation or F^(y) where F^ is the best estimator
        time = []  # time it took to compute the fractional vector y
        FW_iterations = []
        degree = []  # degree of the polynomial estimator
        center = []  # point where the polynomial estimator is centered
        samples = []  # number of samples used in the sampler estimator
        for item in result:
            solutions.append(item[0])
            objectives.append(item[1])
            time.append(np.log(item[2]))
            FW_iterations.append(item[3])
Esempio n. 20
0
def main(argv):

    try:
        opts, args = getopt.getopt(argv, "hdt:p::",
                                   ["dataset", "train", "process"])
        for opt, arg in opts:
            if opt == '-h':
                usage()
                sys.exit()
            elif opt in ('-d', '--dataset'):
                dg.generate_datasets()
            elif opt in ('-t', '--train'):
                classifier.train_models([opt])
            elif opt in ('-p', '--process'):
                # load the params needed for image un-distortion
                params = helpers.load('parameters.p', './')
                mtx = params['mtx']
                dist = params['dist']
                M = params['M']
                Minv = params['Minv']

                LeftLine = line.Line(max_lines=3)
                RightLine = line.Line(max_lines=3)

                # Load svm model and scaler
                model = joblib.load(arg)
                clf = model['clf']
                x_scaler = model['scaler']

                # stores boxes of the last n frames
                box_deque = collections.deque(maxlen=10)

                search_windows = [(1.5, [400, 560], [0, 400], "Img 96x96"),
                                  (2, [400, 672], [0, 360], "Img 128x128"),
                                  (2.5, [432, None], [0, 100], "Img 160x160")]

                process = lambda image: process_image(image,
                                                      mtx,
                                                      dist,
                                                      M,
                                                      Minv,
                                                      LeftLine,
                                                      RightLine,
                                                      clf,
                                                      x_scaler,
                                                      box_deque,
                                                      search_windows,
                                                      threshold=5)

                if False:
                    test_images = glob.glob('test_images/*.jpg')
                    test_images.sort(key=os.path.getmtime)
                    fig = plt.figure(figsize=(20, 12))
                    tstart = time.time()
                    for img_name in test_images[1:3]:
                        img = mpimg.imread(img_name)
                        detect_cars(img,
                                    clf,
                                    x_scaler,
                                    box_deque,
                                    search_windows,
                                    plot=True,
                                    threshold=1)
                    tend = time.time()
                    print("duration: ", round(tend - tstart, 5))
                else:
                    #output = 'project_video_processed.mp4'
                    #clip = VideoFileClip("project_video.mp4")
                    output = 'test_video_processed.mp4'
                    clip = VideoFileClip("test_video.mp4")
                    output_clip = clip.fl_image(process)
                    output_clip.write_videofile(output, audio=False)

    except getopt.GetoptError:
        usage()
        sys.exit(2)

    sys.exit()
Esempio n. 21
0
import helpers as hp
import SpareMatrixPR as SMPR
import ShowImages as si
import configparser as cp
import os

config = cp.ConfigParser()
config.read(os.getcwd() + '/config.ini')

k = input("Enter no. dominant images :  ")

# adjacency Matrix saved as part of task 1
adj_matrix = hp.load("adj_matrix.txt")

# imageId to Index
IdsIdxMap = hp.load("IdsIdxMap.txt")

# Index to Image Id
IdxIdsMap = hp.load("IdxIdsMap.txt")

pr2 = SMPR.pagerank_scipy(adj_matrix, alpha=0.85, tol=1.0e-10)

imageidx = pr2.argsort()[-int(k):][::-1]

imageidxList = []

for idx in imageidx:
    imageidxList.append(IdxIdsMap[idx])

devset_path = config['TASK3']['devset_path']
si.fetchImages(imageidxList, devset_path)
Esempio n. 22
0
async def ftd_command(ctx, arg="", arg2="", arg3=""):
    """Displays barchart of fail of deliver data of a given stock and sends it

    Parameters
    -----------
    arg: str
        ticker, -h or help
    arg2:
        date (in date format for start date)
    arg3:
        date (in date format for end date)

    Returns
    -------
    discord message
        Sends a message containing an image displaying a barchart fail to
        deliver data to the user during a given time period
    """

    try:
        # Debug
        if cfg.DEBUG:
            print(f"!stocks.dps.ftd {arg} {arg2} {arg3}")

        # Help
        if arg == "-h" or arg == "help":
            help_txt = (
                "Display fails-to-deliver data for a given ticker. [Source: SEC]\n"
            )
            help_txt += "\nPossible arguments:\n"
            help_txt += "<TICKER> Stock ticker. REQUIRED!\n"
            help_txt += "<DATE_START> Start of data. Default: 1 year ago\n"
            help_txt += "<DATE_END> End of data. Default: today\n"
            embed = discord.Embed(
                title="Stocks: [SEC] Failure-to-deliver HELP",
                description=help_txt,
                colour=cfg.COLOR,
            )
            embed.set_author(
                name=cfg.AUTHOR_NAME,
                icon_url=cfg.AUTHOR_ICON_URL,
            )

        else:
            if arg == "":
                title = "ERROR Stocks: [SEC] Failure-to-deliver"
                embed = discord.Embed(title=title, colour=cfg.COLOR)
                embed.set_author(
                    name=cfg.AUTHOR_NAME,
                    icon_url=cfg.AUTHOR_ICON_URL,
                )
                embed.set_description(
                    "No ticker entered.\nEnter a valid ticker, example: GME")
                await ctx.send(embed=embed)
                if cfg.DEBUG:
                    print("ERROR: No ticker entered")
                return

            # Parse argument
            ticker = arg.upper()
            if arg2 == "":
                start = datetime.now() - timedelta(days=365)
            else:
                try:
                    start = datetime.strptime(arg2, cfg.DATE_FORMAT)
                except Exception as e:
                    title = "ERROR Stocks: [SEC] Failure-to-deliver"
                    embed = discord.Embed(title=title, colour=cfg.COLOR)
                    embed.set_author(
                        name=cfg.AUTHOR_NAME,
                        icon_url=cfg.AUTHOR_ICON_URL,
                    )
                    embed.set_description(
                        f"Start Date given: {arg2}"
                        f"\nEnter a valid start date using the format: {cfg.DATE_FORMAT}"
                    )
                    await ctx.send(embed=embed)
                    if cfg.DEBUG:
                        print(
                            f"ERROR: Wrong start date parameter entered\n{e}")
                    return
            if arg3 == "":
                end = datetime.now()
            else:
                try:
                    end = datetime.strptime(arg3, cfg.DATE_FORMAT)
                except Exception as e:
                    title = "ERROR Stocks: [SEC] Failure-to-deliver"
                    embed = discord.Embed(title=title, colour=cfg.COLOR)
                    embed.set_author(
                        name=cfg.AUTHOR_NAME,
                        icon_url=cfg.AUTHOR_ICON_URL,
                    )
                    embed.set_description(
                        f"End Date given: {arg3}"
                        f"\nEnter a valid end date using the format: {cfg.DATE_FORMAT}"
                    )
                    await ctx.send(embed=embed)
                    if cfg.DEBUG:
                        print(f"ERROR: Wrong end date parameter entered\n{e}")
                    return

            plt.ion()
            try:
                ftds_data = sec_model.get_fails_to_deliver(
                    ticker, start, end, 0)
            except Exception as e:
                title = f"ERROR Stocks: [SEC] Failure-to-deliver {ticker}"
                embed = discord.Embed(title=title, colour=cfg.COLOR)
                embed.set_author(
                    name=cfg.AUTHOR_NAME,
                    icon_url=cfg.AUTHOR_ICON_URL,
                )
                embed.set_description(f"Ticker given: {arg}"
                                      "\nEnter a valid ticker, example: GME")
                await ctx.send(embed=embed)
                if cfg.DEBUG:
                    print(
                        f"POSSIBLE ERROR: Wrong ticker parameter entered\n{e}")
                return
            plt.bar(
                ftds_data["SETTLEMENT DATE"],
                ftds_data["QUANTITY (FAILS)"] / 1000,
            )
            plt.ylabel("Shares [K]")
            plt.title(f"Fails-to-deliver Data for {ticker}")
            plt.grid(b=True,
                     which="major",
                     color="#666666",
                     linestyle="-",
                     alpha=0.2)
            plt.gca().xaxis.set_major_formatter(
                mdates.DateFormatter("%Y/%m/%d"))
            plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
            plt.gcf().autofmt_xdate()
            plt.xlabel("Days")
            _ = plt.gca().twinx()
            stock = helpers.load(ticker, start)
            stock_ftd = stock[stock.index > start]
            stock_ftd = stock_ftd[stock_ftd.index < end]
            plt.plot(stock_ftd.index,
                     stock_ftd["Adj Close"],
                     color="tab:orange")
            plt.ylabel("Share Price [$]")
            plt.savefig("dps_ftd.png")
            plt.close("all")
            uploaded_image = gst_imgur.upload_image("dps_ftd.png",
                                                    title="something")
            image_link = uploaded_image.link
            title = "Stocks: [SEC] Failure-to-deliver " + ticker
            embed = discord.Embed(title=title, colour=cfg.COLOR)
            embed.set_author(
                name=cfg.AUTHOR_NAME,
                icon_url=cfg.AUTHOR_ICON_URL,
            )
            embed.set_image(url=image_link)
            os.remove("dps_ftd.png")

        await ctx.send(embed=embed)

    except Exception as e:
        title = "INTERNAL ERROR"
        embed = discord.Embed(title=title, colour=cfg.COLOR)
        embed.set_author(
            name=cfg.AUTHOR_NAME,
            icon_url=cfg.AUTHOR_ICON_URL,
        )
        embed.set_description(
            "Try updating the bot, make sure DEBUG is True in the config "
            "and restart it.\nIf the error still occurs open a issue at: "
            "https://github.com/GamestonkTerminal/GamestonkTerminal/issues"
            f"\n{e}")
        await ctx.send(embed=embed)
        if cfg.DEBUG:
            print(e)
Esempio n. 23
0
 def load_options(self, args):
     super(FindSimilarsMapReduce, self).load_options(args)
     self.new_eigenface = helpers.load(
         getattr(self.options, FACE_OPTION[2:]))
     self.max_resutls = getattr(self.options, MAX_RESULTS_OPTION[2:])
Esempio n. 24
0
 def __init__(self, center):
     self.sprite = DropSprite(h.load(self.sprite_to_load), center, self)
     self.sprite.is_weapon = True
     self.top_sprite = TopSprite(h.load(self.top_sprite_to_load), self)
Esempio n. 25
0
import tensorflow_datasets as tfds

from helpers import Word_Index, one_hot, load

tokenizer = tfds.features.text.Tokenizer()
word_index = Word_Index()
model = load()

review = "best movie best actors high praise"
tokenized_review = tokenizer.tokenize(review)
encoded_review = [word_index.get_index(word) for word in tokenized_review]
one_hot_review = one_hot([encoded_review])

prediction = model.predict(one_hot_review)
print(prediction)
plot_power_consumption = True

save_figures = False  # save  the figures that are plotted
show_figures = True  # show  the figures that are plotted

# code execution

# LOAD THE LOGGING DATA (OUTPUT)
run_counter = 0
filenames_output = ["BM_AS10_200812T153942_Nokken_Hs2_7s_Perp_3sigma_nofilter.csv", "BM_AS10_200812T151056_Nokken_Hs2_7s_Perp_3sigma_nofilter.csv", "BM_AS10_200812T152442_Nokken_Hs2_7s_Perp_3sigma_nofilter.csv"]
pathname=[]
for fn in filenames_output:
    pathname.append(os.path.join(initialdiroutput, fn))


df_log = helpers.load(pathname, save_to_feather=False, initialdir=initialdiroutput)

# LOAD THE SOURCE DATA (INPUT)
# filename = filedialog.askopenfilename(initialdir=initialdirinput,
#                                       title="Select data files",
#                                       filetypes=(("xlsx files", "*.xlsx"), ("all files", "*.*")))

filename_input = "NORMAND_Tp070_Hs200_Hdg000_Perpendicular.xlsx"
pathname_input = os.path.join(initialdirinput, filename_input)

df_source = pd.read_excel(pathname_input)
timestamps = []
for i in range(df_source.shape[0]):
    timestamp = pd.Timestamp(df_log['Timestamp'].values[0] + pd.Timedelta(seconds=df_source['TIME'].values[i]+15.6))
    timestamps.append(timestamp)
Esempio n. 27
0
n = pypsa.Network(snakemake.input.network)
regions = gpd.read_file(snakemake.input.regions).set_index('name')
cost = (combine_oneports(
    xr.open_dataset(snakemake.input.costs).sum('snapshot')).set_index({
        'branch': ['component', 'branch_i']
    }).transpose('sink', 'source', 'source_carrier', 'branch'))

if 'price' in snakemake.output[0]:
    if any(n.snapshot_weightings.objective != 1):
        w = ntl.cost.snapshot_weightings(n)
        cost['one_port_operational_cost'] /= w
        cost['co2_cost'] /= w
    payment_type = 'Average Price for \n'
    unit = '€/MWh'
    demand = load(n).sum('snapshot')
    cost = cost / demand
    fmt = None

else:
    payment_type = 'Allocated'
    unit = '€'
    fmt = scfmt

payer = cost.sum(['source', 'branch']).reindex(sink=regions.index).fillna(0)
receiver = cost.sum('sink').reindex(source=regions.index).fillna(0)

res = '50m' if 'test' in snakemake.output[0] else '10m'
nplot_kwargs = dict(bus_colors='rosybrown',
                    geomap=res,
                    line_widths=0,
Esempio n. 28
0
            if root in files:
                files[root].append(f)
            else:
                files[root] = [f]


    if args.type == 'SEEDSvsUTILITY':
        seeds = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]
        utility1 = []
        utility2 = []
        for seed in seeds:
            path1 = 'results/continuous_greedy/IM/epinions_100_10cascades/k_' + str(seed) \
                    + '_100_FW/polynomial_degree_1_around_05'
            path2 = 'results/continuous_greedy/IM/epinions_100_10cascades/k_' + str(
                seed) + '_100_FW/polynomial_degree_2_around_05'
            result1 = load(path1)
            result2 = load(path2)
            utility1.append(result1[-1][3])
            utility2.append(result2[-1][3])
        plt.figure()
        plt.plot(seeds, utility1, 's', label='Polynomial Estimator degree 1')
        plt.plot(seeds, utility2, 's', label='Polynomial Estimator degree 2')
        plt.title("Number of seeds vs utility")
        plt.xlabel("Constraints")
        plt.ylabel("f^(y)")
        ax = plt.gca()
        handles, labels = ax.get_legend_handles_labels()
        # sort both labels and handles by labels
        labels, handles = zip(*sorted(zip(labels, handles), key=lambda l: l[0]))
        ax.legend(handles, labels)
        plt.show()
Esempio n. 29
0
    parser.add_argument(
        '--center',
        default=0.5,
        type=float,
        help='The point around which Taylor approximation is calculated')
    parser.add_argument(
        '--samples',
        default=500,
        type=int,
        help='Number of samples used to calculate the sampler estimator')
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)

    if args.problem is not None:
        newProblem = load(args.problem)
        args.problemType = args.problem.split("_")[0].split("/")[-1]
        args.input = args.problem.split("_")[1] + "_" + args.problem.split(
            "_")[2] + "_" + args.problem.split("_")[3]
        args.constraints = int(args.problem.split("_")[-1])

    else:
        if args.problemType == 'DR':
            rewards = load(args.rewardsInput)
            givenPartitions = load(args.partitionsInput)
            types = load(args.typesInput)
            k_list = load(args.constraints)
            newProblem = DiversityReward(rewards, givenPartitions, types,
                                         k_list)

        elif args.problemType == 'QS':
Esempio n. 30
0
 def load_options(self, args):
     super(FindSimilarsMapReduce, self).load_options(args)
     self.new_eigenface = helpers.load(getattr(self.options, FACE_OPTION[2:]))
     self.max_resutls = getattr(self.options, MAX_RESULTS_OPTION[2:])