Beispiel #1
0
 def train(self, training_data, validation_data):
     if not self.initialize_vars:
         self.sess = tf.Session()
         self.sess.run(tf.global_variables_initializer())
         self.initialize_vars = True
     actions, states, frames = training_data
     # print(type(frames), frames.shape)
     for e in range(self.EPOCHS):
         # do some training
         for i in range(10):
             train_loss, _ = self.sess.run(
                 [self.total_loss, self.opt], {
                     self.in_action: actions,
                     self.in_state: states,
                     self.in_frame: frames
                 })
             self.train_losses.append(train_loss)
         print("training loss: ", np.mean(self.train_losses))
         # do some validation
         validation_loss = self.sess.run(
             [self.total_loss], {
                 self.in_action: validation_data[0],
                 self.in_state: validation_data[1],
                 self.in_frame: validation_data[2]
             })
         self.valid_losses.append(validation_loss)
         print("validation loss: ", np.mean(self.valid_losses))
     util.save(SAVE_GRAPH_NAME, session=self.sess)
     self.train_losses = []
     self.valid_losses = []
Beispiel #2
0
def main():
    # get argument
    args = util.get_args()

    # print argument
    util.print_args(args)

    # run te
    mlus = []
    rcs = []

    # at every T step
    for t in range(args.num_test):
        if t % args.T == 0:
            repetita_args = util.get_repetita_args(args, t)
            print('command:', ' '.join(repetita_args))
            stdout = util.call(repetita_args)
            if stdout:
                print('stdout:', stdout)
                mlu, rc = util.parse_result(t, stdout, args)
                if len(mlu) == args.T:
                    mlus.append(mlu)
                if rc is not None:
                    rcs.append(rc)

    util.save(mlus, rcs, args)
Beispiel #3
0
def create_reference_table():
    words = util.load(words_path)
    times = np.array(util.load(data_path))
    pos = util.load(pos_path)

    table = collections.defaultdict(dict)
    count = collections.defaultdict(dict)
    for w, t, p in zip(words, times, pos):
        if w == "":
            continue
        l = len(w)
        if p not in table or l not in table[p]:
            table[p][l] = t
            count[p][l] = 1
        else:
            table[p][l] += t
            count[p][l] += 1

    for p in table:
        for l in table[p]:
            table[p][l] = table[p][l] / float(count[p][l])

    print(table)

    util.save(table, 'pos_len_table')
 def send_var_fs(self, var_name, val):
   fname = ut.make_temp_nfs('.pk')
   ut.save(fname, val)
   res = self.view().execute('import util as ut; ut.wait_for_file("%s", 30); %s = ut.load("%s")' % (fname, var_name, fname))
   res.wait()
   assert res.successful()
   os.remove(fname)
Beispiel #5
0
    def run(self):
        # Counter for 99999 to save generated password in output file
        i = 0

        # Generate password list by itertools
        passwords = itertools.product(self.chars, repeat=self.passLen)

        outputList = []
        for password in passwords:

            # Check counter to save generated passwords into the output file
            if i == 99999:
                save(tup=outputList, outFile=self.outputFile)
                i = 0

                # Clear list to avoid of memory leak
                outputList.clear()

            # Convert tuple to string
            it = ''.join(password)

            print(it)
            i += 1

            # Append the generated password to list
            outputList.append(it)

        # Saving the last items that are in memory (RAM) and they are not stored in before
        save(outputList, self.outputFile)
Beispiel #6
0
def plot(results_path, fig_path):
    data = pd.read_csv(
        results_path.joinpath("trial_accuracy_means.csv"))\
        .groupby('model').get_group('exp')\
        .sort('trial')

    trials = data['trial']
    acc = data['median'] * 100

    fig, ax = plt.subplots()
    ax.plot(trials, acc, 'k.')
    ax.set_xlim(1, 200)
    ax.set_ylim(70, 100)
    ax.set_xlabel("Trial", fontsize=14)
    ax.set_ylabel("Percent correct", fontsize=14)
    util.clear_right(ax)
    util.clear_top(ax)
    util.outward_ticks(ax)
    fig.set_figheight(3.5)
    fig.set_figwidth(4.5)
    plt.draw()
    plt.tight_layout()

    pths = [fig_path.joinpath("trial_accuracy.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #7
0
 def save_model(self, table_name="q_learner_tables.pkl"):
     """
   save trained q learner, aka the q table (and t table and r table if dyna is included)
   :param table_name:saved table name
 """
     tables = [self.q_table, self.t_table, self.r_table]
     save(tables, table_name)
def plot(dest, results_path, query):

    # load human mass responses
    human_mass = pd\
        .read_csv(os.path.join(results_path, "human_mass_responses_by_stimulus.csv"))\
        .groupby('version')\
        .get_group('H')\
        .set_index(['stimulus', 'kappa0'])\
        .sortlevel()\
        .unstack('kappa0')\
        .reorder_levels([1, 0], axis=1) * 100

    # load model mass responses
    model_mass = pd\
        .read_csv(os.path.join(results_path, "model_mass_responses_by_stimulus.csv"))\
        .set_index(['likelihood', 'counterfactual', 'stimulus', 'kappa0'])\
        .sortlevel()\
        .unstack('kappa0')\
        .reorder_levels([1, 0], axis=1)

    # load mass correlations
    mass_corrs = pd\
        .read_csv(os.path.join(results_path, "mass_responses_by_stimulus_corrs.csv"))\
        .set_index(['version', 'likelihood', 'counterfactual'])\
        .sortlevel()\
        .ix['H']

    # color config
    plot_config = util.load_config()["plots"]
    #palette = plot_config["colors"]
    #colors = [palette[0], palette[2]]
    colors = ['.4', '.1']
    markers = ['o', 's']
    lightgrey = plot_config["lightgrey"]
    sns.set_style("white", {'axes.edgecolor': lightgrey})

    # create the figure
    fig, ax = plt.subplots()

    plot_kappas(ax, model_mass.ix[('empirical', True)], human_mass, colors, markers)
    ax.set_xlabel(r"Empirical observer, $p(\kappa=10|F_t,S_t)$")
    ax.set_ylabel(r"% participants choosing $\kappa=10$")
    format_mass_plot(ax, lightgrey)
    add_corr(ax, mass_corrs.ix[('empirical', True)])

    # create the legend
    make_legend(ax, colors, markers)

    # clear the top and right axis lines
    sns.despine()

    # set figure size
    fig.set_figheight(3)
    fig.set_figwidth(3.5)
    plt.draw()
    plt.tight_layout()

    # save
    for pth in dest:
        util.save(pth, close=False)
Beispiel #9
0
def codeProject(args,flag,data):
  PARAM_KEY = 1
  PARAM_PATH = 2
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1

  # JSON mapping files and storage of this
  if( keyExists("projects",args[1])):
    if( "stdout" in args[2]):
      project = json.loads(load("projects/"+args[PARAM_KEY])); # Uses key value storage
      directory = args[PARAM_PATH] + "/" + args[PARAM_KEY]
      
      mkdir(directory)
      for x in project.keys(): # Reflect that with here
        _file = json.loads(load("files/"+x));
        out = '';
        for y in _file:
          block = str(load("blocks/"+ y))
          if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
            block = format.block(block, args[PARAM_FORMATTER])     
          out += block
        # Output the file with the correct file name
        save(directory + "/" + project[x],out)

  else:
    error("Error: Project does not exist")
def store(frame_list, tube_bbox, video_id, utterance, person_id, start, end,
          video_count, chunk_start, args):
    out, final_bbox = crop_bbox_from_frames(frame_list,
                                            tube_bbox,
                                            min_frames=args.min_frames,
                                            image_shape=args.image_shape,
                                            min_size=args.min_size,
                                            increase_area=args.increase)
    if out is None:
        return []

    start += round(chunk_start * REF_FPS)
    end += round(chunk_start * REF_FPS)
    name = (person_id + "#" + video_id + "#" + utterance + '#' +
            str(video_count).zfill(3) + ".mp4")
    partition = 'test' if person_id in TEST_PERSONS else 'train'
    save(os.path.join(args.out_folder, partition, name), out, args.format)
    return [{
        'bbox': '-'.join(map(str, final_bbox)),
        'start': start,
        'end': end,
        'fps': REF_FPS,
        'video_id': '#'.join([video_id, person_id]),
        'height': frame_list[0].shape[0],
        'width': frame_list[0].shape[1],
        'partition': partition
    }]
Beispiel #11
0
def vbow(input_file, output_folder):
    img, img_path = load_images_from_file(
        input_file)  # take all images category by category
    size = [5, 50, 100, 250, 500, 750, 1000]
    kp = extract_kp(img)
    #sift = sift_features(img, kp)
    gist = gist_features(img, kp)
    descriptors = [gist]
    #descriptors = [gist, sift]

    for d in descriptors:
        name = d[0]
        desc_list = d[1]  # Takes the descriptor list which is unordered one
        cat_list = d[
            2]  # Takes the sift features that is seperated class by class for train data
        create_dir(output_folder)
        create_dir(f"{output_folder}/{name}/")
        for s in size:
            create_dir(f"{output_folder}/{name}/{s}/")
        print(f"--- Create VBoW for {name} features. ---")
        for s in size:
            vbow = generate_vbow(
                s, desc_list, cat_list, 100,
                10)  # Takes the central points which is visual words
            save(vbow, img_path, f"{output_folder}/{name}/{s}/")
Beispiel #12
0
 def accept(self):
     """ OK """
     args = str(self.ui.txtArgs.toPlainText()).replace("\n", " ")
     self.arglist = args.split(' ')
     self._working_dir = str(self.ui.txtWorkDir.text())
     util.save(args, self._working_dir)
     QDialog.accept(self)
        def closure():
            optimizer.zero_grad()
            vgg_model(input_image)
            style_score = 0
            content_score = 0

            for s in style_losses:
                style_score += s.loss
            for c in content_losses:
                content_score += c.loss

            style_score *= style_weight
            content_score *= content_weight

            loss = style_score + content_score
            loss.backward()

            # puede darse que la imagen haya acabado con píxeles fuera de [0,1], esto lo arregla
            input_image.data.clamp_(0, 1)
            i[0] += 1

            if i[0] % 50 == 0:
                print("iteraciones: {}:".format(i[0]))
                print(
                    'Diferencia de estilo : {:4f} Diferencia de contenido: {:4f}'
                    .format(style_score.item(), content_score.item()))
                if path != "":
                    save(input_image, path.format(i[0]))
                print()

            return style_score + content_score
def plot(results_path, fig_path):
    data = pd.read_csv(
        results_path.joinpath("trial_time_means.csv"))\
        .groupby('model').get_group('exp')\
        .sort_values(by='trial')

    trials = data['trial']
    times = data['median']

    fig, ax = plt.subplots()
    ax.plot(trials, times, 'k.')
    ax.set_xlim(1, 200)
    ax.set_xlabel("Trial", fontsize=14)
    ax.set_ylabel("Response time", fontsize=14)
    util.clear_right(ax)
    util.clear_top(ax)
    util.outward_ticks(ax)
    fig.set_figheight(3.5)
    fig.set_figwidth(4.5)
    plt.draw()
    plt.tight_layout()

    pths = [fig_path.joinpath("trial_time.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #15
0
def codeFile(args,flag,data): 
  PARAM_KEY = 1;
  PARAM_FILE = 2; # Output file location
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1
  # Ability to add a block of code through copy and paste and have it formatted correctly!
  if( keyExists("files",args[PARAM_KEY])):
    _file = json.loads(load("files/"+args[PARAM_KEY]));
    out = ''

    # loadJSON 
    for x in _file:
      block = str(load("blocks/"+ x))
      if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
        block = format.block(block, args[PARAM_FORMATTER])     
      out += block
      out += "\n" # Adds some spacing between blocks

    # No file specified
    if(len(args) < 3 ): 
      log(out)
    else:
      log("Saving to file "+ args[PARAM_FILE] )
      save(args[PARAM_FILE],out)
  else:
    error("Error: File does not exist")
def load_dataset(fname, nb_lines):
    """Load the Amazon dataset if not already present on disc"""
    import os.path
    if os.path.isfile('safe/Amazon-'+str(nb_lines)+'.p'):
        return util.load('safe/Amazon-'+str(nb_lines)+'.p')
    count = 1
    X = []
    y = []
    with open(fname) as f:
        for line in f:
            text, label = read_line(line)
            #print((label, text))
            X.append(text)
            y.append(label)
            if count >= nb_lines:
                break
            count+=1

    #load pretrained dictonary
    dico = util.load('safe/vocab_gensim.p')
    preprocessor = text_preprocessing.Preprocessor(dico=dico)
    X = preprocessor.preprocess(X)
    #save the loaded dataset in a pickle for speeding up next run
    util.save((X,y), 'safe/Amazon-'+str(nb_lines)+'.p')
    return (X, y)
def run_style_transfer(content_image,
                       style_image,
                       input_image,
                       num_steps=300,
                       style_weight=1000000,
                       content_weight=1,
                       path=""):

    vgg_model, style_losses, content_losses = get_style_model_and_losses(
        style_image,
        content_image,
        style_layers=style_layers,
        content_layers=content_layers)
    optimizer = get_input_optimizer(input_image)

    i = [0]
    input_image.data.clamp_(0, 1)

    if path != "":
        path += "\\{}.jpg"
        save(input_image, path.format(i[0]))

    while i[0] <= num_steps:

        def closure():
            optimizer.zero_grad()
            vgg_model(input_image)
            style_score = 0
            content_score = 0

            for s in style_losses:
                style_score += s.loss
            for c in content_losses:
                content_score += c.loss

            style_score *= style_weight
            content_score *= content_weight

            loss = style_score + content_score
            loss.backward()

            # puede darse que la imagen haya acabado con píxeles fuera de [0,1], esto lo arregla
            input_image.data.clamp_(0, 1)
            i[0] += 1

            if i[0] % 50 == 0:
                print("iteraciones: {}:".format(i[0]))
                print(
                    'Diferencia de estilo : {:4f} Diferencia de contenido: {:4f}'
                    .format(style_score.item(), content_score.item()))
                if path != "":
                    save(input_image, path.format(i[0]))
                print()

            return style_score + content_score

        optimizer.step(closure)

    return input_image
def plot(dest, results_path, counterfactual, likelihood):
    if likelihood == 'ipe':
        likelihood = 'ipe_' + util.get_query()

    # load in the model data, which includes fitted parameters
    data = pd\
        .read_csv(os.path.join(results_path, 'model_belief_by_trial_fit.csv'))\
        .groupby(['fitted', 'counterfactual', 'likelihood'])\
        .get_group((True, counterfactual, likelihood))\
        .drop_duplicates(['version', 'model', 'pid', 'B'])\
        .set_index(['version', 'model', 'pid'])\
        .sortlevel()

    # double check that there is exactly one parameter for each pid
    assert data.index.is_unique

    # plotting config stuff
    plot_config = util.load_config()["plots"]
    all_colors = plot_config["colors"]
    colors = {'static': all_colors[1], 'learning': all_colors[0]}

    # create the figure and plot the histograms
    fig, axes = plt.subplots(2, 3, sharex=True)
    for i, version in enumerate(['H', 'G', 'I']):
        for j, model in enumerate(['static', 'learning']):
            hist(axes[j, i], data.ix[(version, model)]['B'],
                 plot_config['darkgrey'])

    # set titles and axis labels
    for i in range(3):
        axes[0, i].set_title('Experiment {}'.format(i + 1), y=1.05)
    for ax in axes[:, 0]:
        ax.set_ylabel("% participants")
    for ax in axes[1]:
        ax.set_xlabel(r"Best fit learning rate ($\beta$)")

    for i, label in enumerate(['Static', 'Learning']):
        mid = sum(axes[i, 0].get_ylim()) / 2.0
        axes[i, 0].text(
            -2.6,
            mid,
            label,
            rotation=90,
            fontsize=12,  # same as title font size
            verticalalignment='center')

    # clear top and right axis lines
    sns.despine()

    # set figure size
    fig.set_figwidth(6.5)
    fig.set_figheight(3.25)
    plt.draw()
    plt.tight_layout()
    plt.subplots_adjust(left=0.14)

    # save
    for pth in dest:
        util.save(pth, close=False)
Beispiel #19
0
def saveTest():
    data = [0,1,2,3,4]
    name = "test.pkl"
    util.save(data,name)
    data2 = util.load(name)

    print data, data2
    return data == data2
Beispiel #20
0
def train(args, params):

    print("Loading training set...")
    train = load.load_dataset(params['train'])
    print("Loading dev set...")
    dev = load.load_dataset(params['dev'])
    print("Building preprocessor...")
    preproc = load.Preproc(*train)
    print("train_set_classes:", preproc.classes)
    print("Training size: " + str(len(train[0])) + " examples.")
    print("Dev size: " + str(len(dev[0])) + " examples.")

    save_dir = make_save_dir(params['save_dir'], args.experiment)

    util.save(preproc, save_dir)

    params.update({
        "input_shape": [None, 1],
        "num_categories": len(preproc.classes)
    })

    model = network.build_network(**params)

    stopping = keras.callbacks.EarlyStopping(patience=30)

    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        factor=0.1, patience=2, min_lr=params["learning_rate"] * 0.001)

    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath=get_filename_for_saving(save_dir), save_best_only=False)

    batch_size = params.get("batch_size", 32)

    # summary = str(model.summary(print_fn=lambda x: fh.write(x + '\n')))
    # out = open("/content/ecg/report.txt",'w')
    # out.write(summary)
    # out.close

    if params.get("generator", False):
        train_gen = load.data_generator(batch_size, preproc, *train)
        dev_gen = load.data_generator(batch_size, preproc, *dev)
        model.fit_generator(train_gen,
                            steps_per_epoch=int(len(train[0]) / batch_size),
                            epochs=MAX_EPOCHS,
                            validation_data=dev_gen,
                            validation_steps=int(len(dev[0]) / batch_size),
                            callbacks=[checkpointer, reduce_lr, stopping])
        # util.learning_curve(history)

    else:
        train_x, train_y = preproc.process(*train)
        dev_x, dev_y = preproc.process(*dev)
        model.fit(train_x,
                  train_y,
                  batch_size=batch_size,
                  epochs=MAX_EPOCHS,
                  validation_data=(dev_x, dev_y),
                  callbacks=[checkpointer, reduce_lr, stopping])
Beispiel #21
0
def plot(results_path, fig_path):
    order = ['exp', 'oc', 'th', 'hc', 'bq', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    pth = results_path.joinpath("all_response_times.pkl")
    with open(pth, "r") as fh:
        times = pickle.load(fh)

    fig, axes = plt.subplots(1, len(order), sharey=True)
    for i, key in enumerate(order):
        ax = axes[i]
        bins = 200

        if key == 'exp':
            hist, edges = np.histogram(
                times[key] / 1000., bins=bins,
                range=(0, 20))
        else:
            hist, edges = np.histogram(
                times[key], bins=bins,
                range=(0, bins))

        edges = edges[:101]
        hist = hist[:100]
        hist = hist * 100 / float(len(times[key]))
        width = edges[1] - edges[0]
        ax.bar(edges[:-1], hist, width=width, color='k')

        ax.set_xlim(0, edges[-1])
        ax.set_title(titles[key], fontsize=14)
        util.clear_right(ax)
        util.clear_top(ax)
        util.outward_ticks(ax)

        if key == 'exp':
            ax.set_xlabel("RT (seconds)", fontsize=14)
        else:
            ax.set_xlabel("Number of actions", fontsize=14)
    axes[0].set_ylabel("Percent", fontsize=14)

    fig.set_figheight(2.5)
    fig.set_figwidth(16)

    plt.draw()
    plt.tight_layout()

    pths = [fig_path.joinpath("response_time_histograms.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
def plot(results_path, fig_path):
    order = ['exp', 'oc', 'th', 'hc', 'bq', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    pth = results_path.joinpath("all_response_times.pkl")
    with open(pth, "r") as fh:
        times = pickle.load(fh)

    fig, axes = plt.subplots(2, 3, sharey=True)
    for i, key in enumerate(order):
        ax = axes.flat[i]
        bins = 200

        if key == 'exp':
            hist, edges = np.histogram(
                times[key] / 1000., bins=bins,
                range=(0, 20))
        else:
            hist, edges = np.histogram(
                times[key], bins=bins,
                range=(0, bins))

        edges = edges[:101]
        hist = hist[:100]
        hist = hist * 100 / float(len(times[key]))
        ax.fill_between(edges[:-1], hist, np.zeros_like(hist), color='#666666')

        ax.set_xlim(0, edges[-1])
        ax.set_title(titles[key])

        if key == 'exp':
            ax.set_xlabel("RT (seconds)")
        else:
            ax.set_xlabel("Number of actions")

    axes[0, 0].set_ylabel("Percent")
    axes[1, 0].set_ylabel("Percent")

    sns.despine()

    fig.set_figheight(4)
    fig.set_figwidth(6)
    plt.tight_layout()
    plt.subplots_adjust(left=0.1)

    pths = [fig_path.joinpath("response_time_histograms.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
def downloadYear(year):
    yearString = str(year)

    examineUrl = '/year/' + yearString
    limit = -1

    navUrls = {}
    pageUrls = {}

    navUrls[examineUrl] = False

    newNavExamined = True
    count = 0
    while (newNavExamined and (limit == -1 or count < limit)):
        newNavExamined = False
        for url, examined in navUrls.iteritems():
            if not examined:
                time.sleep(2)
                try:
                    newPages, newNavs = scanForUrls(url)
                except:
                    print 'Exception occured ' + sys.exc_info()[0]
                    break

                for page in newPages:
                    if not page in pageUrls:
                        print 'Adding page ' + page
                        pageUrls[page] = False

                for nav in newNavs:
                    if not nav in navUrls:
                        print 'Adding nav ' + nav
                        navUrls[nav] = False

                navUrls[url] = True
                newNavExamined = True
                break

        count += 1

    print 'Examined ' + str(count) + ' nav pages'

    entries = []

    for path in pageUrls:
        time.sleep(2)
        try:
            entry = parsePage(path)
        except:
            print 'Exception occured ' + sys.exc_info()[0]
            break

        entries.append(entry)

    save(entries, 'data/' + yearString + '.json')

    print 'Downloaded ' + str(len(entries)) + ' applications'
def run(video_id, args):
    if not os.path.exists(
            os.path.join(args.video_folder,
                         video_id.split('#')[0] + '.mp4')):
        download(video_id.split('#')[0], args)

    if not os.path.exists(
            os.path.join(args.video_folder,
                         video_id.split('#')[0] + '.mp4')):
        print('Can not load video %s, broken link' % video_id.split('#')[0])
        return
    reader = imageio.get_reader(
        os.path.join(args.video_folder,
                     video_id.split('#')[0] + '.mp4'))
    fps = reader.get_meta_data()['fps']

    df = pd.read_csv(args.metadata)
    df = df[df['video_id'] == video_id]

    all_chunks_dict = [{
        'start': df['start'].iloc[j],
        'end': df['end'].iloc[j],
        'bbox': list(map(int, df['bbox'].iloc[j].split('-'))),
        'frames': []
    } for j in range(df.shape[0])]
    ref_fps = df['fps'].iloc[0]
    ref_height = df['height'].iloc[0]
    ref_width = df['width'].iloc[0]
    partition = df['partition'].iloc[0]
    try:
        for i, frame in enumerate(reader):
            for entry in all_chunks_dict:
                if (i * ref_fps >= entry['start'] * fps) and (
                        i * ref_fps < entry['end'] * fps):
                    left, top, right, bot = entry['bbox']
                    left = int(left / (ref_width / frame.shape[1]))
                    top = int(top / (ref_height / frame.shape[0]))
                    right = int(right / (ref_width / frame.shape[1]))
                    bot = int(bot / (ref_height / frame.shape[0]))
                    crop = frame[top:bot, left:right]
                    if args.image_shape is not None:
                        crop = img_as_ubyte(
                            resize(crop, args.image_shape, anti_aliasing=True))
                    entry['frames'].append(crop)
    except imageio.core.format.CannotReadFrameError:
        None

    for entry in all_chunks_dict:
        if 'person_id' in df:
            first_part = df['person_id'].iloc[0] + "#"
        else:
            first_part = ""
        first_part = first_part + '#'.join(video_id.split('#')[::-1])
        path = first_part + '#' + str(entry['start']).zfill(6) + '#' + str(
            entry['end']).zfill(6) + '.mp4'
        save(os.path.join(args.out_folder, partition, path), entry['frames'],
             args.format)
Beispiel #25
0
def getDataAndLabels():
    data = read(DATA_PICKLE)
    labels = read(LABELS_PICKLE)
    if data is None or labels is None:
        data = np.load(DATA_FILE)
        labels = np.load(LABELS_FILE)
        save(data, DATA_PICKLE)
        save(labels, LABELS_PICKLE)
    labels = transformLabels(labels)
    return [data, labels]
Beispiel #26
0
def train(args, params):

    print("Loading training set...")
    train = load.load_dataset(params['train'])
    print("Loading dev set...")
    dev = load.load_dataset(params['dev'])
    print("Building preprocessor...")
    preproc = load.Preproc(*train)
    print("Training size: " + str(len(train[0])) + " examples.")
    print("Dev size: " + str(len(dev[0])) + " examples.")


    save_dir = make_save_dir(params['save_dir'], args.experiment)

    util.save(preproc, save_dir)

    params.update({
        "input_shape": [None, 1],
        "num_categories": len(preproc.classes)
    })

    model = network.build_network(**params)

    stopping = keras.callbacks.EarlyStopping(patience=8)

    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        factor=0.1,
        patience=2,
        min_lr=params["learning_rate"] * 0.001)

    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath=get_filename_for_saving(save_dir),
        save_best_only=False)

    batch_size = params.get("batch_size", 32)

    if params.get("generator", False):
        train_gen = load.data_generator(batch_size, preproc, *train)
        dev_gen = load.data_generator(batch_size, preproc, *dev)
        model.fit_generator(
            train_gen,
            steps_per_epoch=int(len(train[0]) / batch_size),
            epochs=MAX_EPOCHS,
            validation_data=dev_gen,
            validation_steps=int(len(dev[0]) / batch_size),
            callbacks=[checkpointer, reduce_lr, stopping])
    else:
        train_x, train_y = preproc.process(*train)
        dev_x, dev_y = preproc.process(*dev)
        model.fit(
            train_x, train_y,
            batch_size=batch_size,
            epochs=MAX_EPOCHS,
            validation_data=(dev_x, dev_y),
            callbacks=[checkpointer, reduce_lr, stopping])
Beispiel #27
0
def train(args, params):

    print("Loading training set...")
    train = load.load_dataset(params['train'])
    print("Loading dev set...")
    dev = load.load_dataset(params['dev'])
    print("Building preprocessor...")
    preproc = load.Preproc(*train)
    print("Training size: " + str(len(train[0])) + " examples.")
    print("Dev size: " + str(len(dev[0])) + " examples.")

    save_dir = make_save_dir(params['save_dir'], args.experiment)

    util.save(preproc, save_dir)

    params.update({
        "input_shape": [None, 1],
        "num_categories": len(preproc.classes)
    })
    print(params)

    model = network.build_network(**params)

    stopping = keras.callbacks.EarlyStopping(patience=8)

    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        factor=0.1, patience=2, min_lr=params["learning_rate"] * 0.001)

    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath=get_filename_for_saving(save_dir), save_best_only=False)
    ckpt_best = keras.callbacks.ModelCheckpoint(os.path.join(
        save_dir, 'best.hdf5'),
                                                save_best_only=True)

    batch_size = params.get("batch_size", 32)

    if params.get("generator", False):
        train_gen = load.data_generator(batch_size, preproc, *train)
        dev_gen = load.data_generator(batch_size, preproc, *dev)
        model.fit_generator(
            train_gen,
            steps_per_epoch=int(len(train[0]) / batch_size),
            epochs=MAX_EPOCHS,
            validation_data=dev_gen,
            validation_steps=int(len(dev[0]) / batch_size),
            callbacks=[checkpointer, ckpt_best, reduce_lr, stopping])
    else:
        train_x, train_y = preproc.process(*train)
        dev_x, dev_y = preproc.process(*dev)
        model.fit(train_x,
                  train_y,
                  batch_size=batch_size,
                  epochs=MAX_EPOCHS,
                  validation_data=(dev_x, dev_y),
                  callbacks=[checkpointer, ckpt_best, reduce_lr, stopping])
Beispiel #28
0
def insertBlock(args,flag, data):
  if(flag == 1):
    log("Creating Block \"" + get("key") + "\"")
    save("blocks/"+get("key"),data)
    set("continued",0)
    printBlue("Block Creation Complete.")
  else:
    set("continued",1)
    set("prevCmd", "create")
    set("key",args[1])
    log("Enter Block Information and press Ctrl+D when done!")
Beispiel #29
0
def save_model(model, model_name, mode, n_hiddens, act_fun, n_comps,
               batch_norm):

    assert is_data_loaded(), 'Dataset hasn\'t been loaded'

    savedir = root_output + data_name + '/'
    util.make_folder(savedir)
    filename = create_model_id(model_name, mode, n_hiddens, act_fun, n_comps,
                               batch_norm)

    util.save(model, savedir + filename + '.pkl')
Beispiel #30
0
def self_model():
    feature_vocab = _build_feature_vocab(Option.src)
    from perm import TreeParamPermModel as PM
    pm = PM(feature_vocab, Option)
    logger.info('[Train] Loading the source language...:%s' % Option.src)
    Global.mx_dep = Option.mx_dep_train
    data_train = Data(Option.src, conll_reader, valid=lambda x: len(x) < Option.mx_len)
    logger.info('Training...')
    fit(pm, {'data': data_train, 'mx_sent': Option.mx_sent, 'batch_size': Option.batch_size},
        nb_epoch=Option.mx_epoch, callbacks=_callbacks(model=''))
    save(fn=Option.model, model={'model': pm.state_dict(), 'feature_vocab': feature_vocab})
Beispiel #31
0
def getDataAndLabels():
    data = read(DATA_PATH)
    labels = read(LABELS_PATH)
    if data is None or labels is None:
        for dir in files:
            for index in files[dir]:
                [data, labels] = doStuff(dir, index, data, labels)
        save(data, DATA_PATH)
        save(labels, LABELS_PATH)
    labels = transformLabels(labels)
    return [data, labels]
Beispiel #32
0
def build(root, dest):
    dest = os.path.join(dest, 'rbnf')
    shutil.rmtree(dest)
    makedirs(dest)
    path = os.path.join(root, 'common/rbnf')
    names = [os.path.splitext(n)[0] for n in os.listdir(path)]
    for name in names:
        tree = readxml(os.path.join(path, '%s.xml' % name))
        rbnf = convert(tree)
        rbnf = to_utf8(rbnf)
        out = os.path.join(dest, '%s.json' % name)
        save(out, rbnf)
Beispiel #33
0
def handle_update():
    statuses = get_statuses()
    previous_statuses = get_latest_statuses(statuses.keys())
    diffs = find_diffs(previous_statuses, statuses)
    log.debug(f'Changes: {diffs}')

    if diffs:
        send_updates(diffs)

    time = get_time()
    for location, status in statuses.items():
        save(location, time, status)
Beispiel #34
0
def run(config):

    opt_cfg = config["optimizer"]
    data_cfg = config["data"]

    # Model
    model = LinearModel(config=config)
    model.cuda() if use_cuda else model.cpu()

    # Loaders
    batch_size = opt_cfg["batch_size"]
    preproc = None #loader.Preprocessor(data_cfg["train_set"])
    train_ldr = loader.make_loader(data_cfg["train_set"],
                                   batch_size, model, config)
    dev_ldr = loader.make_loader(data_cfg["dev_set"],
                                  batch_size, model, config)

    # Optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                    lr=opt_cfg["learning_rate"],
                    momentum=opt_cfg["momentum"])

    run_state = (0, 0, 0)
    best_so_far = float("inf")
    for e in range(opt_cfg["epochs"]):
        start = time.time()

        run_state = run_epoch(model, optimizer, train_ldr, *run_state)

        msg = "Epoch {} completed in {:.2f} (s)."
        print(msg.format(e, time.time() - start))

        dev_loss = eval_loop(model, dev_ldr)
        print("Dev Loss: {:.5f}".format(dev_loss))

        tb.log_value("dev_loss", dev_loss, e)

        # # Log for tensorboard
        # tb.log_value("dev_loss", dev_loss, e)
        # tb.log_value("dev_map", dev_map, e)
        #
        # print("Dev Loss: {:.2f}".format(dev_loss))
        # print("Dev mAP: {:.2f}".format(dev_map))
        #
        # util.save(model, preproc, config["save_path"])
        #
        # # Save the best model by F1 score on the dev set
        # if dev_map > best_so_far:
        #     best_so_far = dev_map

        util.save(model, preproc,
                       config["save_path"], tag="best")
Beispiel #35
0
    def save(self, file_name):
        params_dict = {}
        params_dict['lst_layer_names'] = [
            layer.name for layer in self._lst_layers
        ]
        params_dict['lst_layer_type'] = self._lst_layer_type
        params_dict['lst_num_hid'] = self._lst_num_hid
        params_dict['data_dim'] = self._data_dim

        for layer in self._lst_layers:
            layer.add_params_to_dict(params_dict)

        util.save(file_name, " ".join(params_dict.keys()), params_dict)
Beispiel #36
0
def main():

    f = dict()
    f[1] = _make_1d_1d

    if len(sys.argv) == 1:
        lv = 1
    else:
        lv = int(sys.argv[1])

    eq = f[lv]()

    save('mathdrill.pdf', eq, '-')
Beispiel #37
0
    def save_tasks(self, sender):
        """Save the tasks to the specified file."""

        task_file = self.save_dialog['txt_save_file'].text
        if task_file:
            if task_file.rfind('.tsk', len(task_file) - 4) == -1:
                task_file += '.tsk'
            self.save_dialog.close()
            if task_file == self.current_task_file:
                # some bug; even though the file should be closed, I can't overwrite it
                util.delete(task_file)
            util.save(self.tasklist.tasks, task_file)
        else:
            self.save_dialog['txt_save_file'].text = ''
Beispiel #38
0
def gist(input_file, output_folder):

    img_dict, img_dict_path = load_images_from_file(
        input_file)  # take all images category by category

    gist = gist_img(img_dict)
    name = gist[0]
    desc_list = gist[1]  # Takes the descriptor list which is unordered one
    cat_dict = gist[
        2]  # Takes the sift features that is seperated class by class for train data
    create_dir(output_folder)
    create_dir(f"{output_folder}/{name}/")
    save(cat_dict, img_dict_path, f"{output_folder}/{name}/")
    print(f"\nThe execution of gist is done!\n")
Beispiel #39
0
    def save_tasks(self, sender):
        """Save the tasks to the specified file."""

        task_file = self.save_dialog["txt_save_file"].text
        if task_file:
            if task_file.rfind(".tsk", len(task_file) - 4) == -1:
                task_file += ".tsk"
            self.save_dialog.close()
            if task_file == self.current_task_file:
                # some bug; even though the file should be closed, I can't overwrite it
                util.delete(task_file)
            util.save(self.tasklist.tasks, task_file)
        else:
            self.save_dialog["txt_save_file"].text = ""
Beispiel #40
0
    def save_tasks(self, sender):
		"""Save the tasks to the specified file."""
		
		task_file = self.save_dialog['textfield1'].text
		if not task_file == '':
			if task_file.rfind('.tsk', len(task_file) -4) == -1:
				task_file += '.tsk'
			self.save_dialog.close()
			if task_file == self.current_task_file:
				# some bug; even though the file should be closed, I can't overwrite 
				util.delete(task_file)
			util.save(self.tasklist.tasks, task_file)
		else:
			self.save_dialog['textfield1'].text = ''
Beispiel #41
0
 def convert_dnss_model(self, new_model):
     dict = {
         'l1_vh': self.weights[1]['w'],
         'l1_hb': self.weights[1]['hb'],
         'l2_vh': self.weights[2]['w'],
         'l2_hb': self.weights[2]['hb'],
         'l3_vh': self.weights[3]['w'],
         'l3_hb': self.weights[3]['hb'],
         'l4_vh': self.weights[4]['w'],
         'l4_hb': self.weights[4]['hb'],
         'l5_vh': self.weights[5]['w'],
         'l5_hb': self.weights[5]['hb']
     }
     varlist = 'l1_vh l1_hb l2_vh l2_hb l3_vh l3_hb l4_vh l4_hb l5_vh l5_hb'
     util.save(new_model, varlist, dict)
Beispiel #42
0
    def run(self, phase, state, checkpoint=False, **kwargs):

        if checkpoint:
            save(state, self.temp_path)

        for _ in range(self.max_trials):
            try:
                phase(state, self, **kwargs)
            except GameStateError as e:
                if checkpoint:
                    state = load(self.temp_path)
            except Exception as e:
                print('Error in this phase!')
                raise e
            else:
                break
Beispiel #43
0
def insertFile(args,flag,data):
  # PARAM_CONSTRUCT = 2
  PARAM_KEY = 1
  PARAM_BLOCKS_BASE = 2
  data = []
  files = os.listdir("blocks")

  for x in args[PARAM_BLOCKS_BASE:]:
    if("unset" not in get("tag")): # Tagging system works!
      x = get("tag") + "." + x
    if x in files:
      data.append(x)
    else :
      print "Error: Invalid Block"

  save("files/" + args[PARAM_KEY], json.dumps(data))  
def plot(results_path, fig_path):
    order = ['hc', 'bq', 'bqp']
    titles = {
        'exp': "Human",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    results = pd.read_csv(
        results_path.joinpath("accuracy_means.csv"))\
        .set_index(['stimulus', 'theta', 'flipped', 'model'])['median']\
        .unstack(['model', 'flipped']) * 100

    fig, axes = plt.subplots(1, len(order), sharey=True, sharex=True)

    for i, key in enumerate(order):
        ax = axes[i]
        for flipped in ['same', 'flipped']:
            ax.plot(
                results[(key, flipped)],
                results[('exp', flipped)],
                '.', alpha=0.8, label=flipped)

        ax.set_xlabel("Model accuracy", fontsize=14)
        ax.set_title(titles[key], fontsize=14)
        util.clear_right(ax)
        util.clear_top(ax)
        util.outward_ticks(ax)

    axes[0].set_ylabel("Human accuracy", fontsize=14)
    axes[0].set_xlim(-5, 105)
    axes[0].set_ylim(45, 105)
    axes[0].legend(loc=0, numpoints=1)

    fig.set_figheight(3)
    fig.set_figwidth(8)
    plt.draw()
    plt.tight_layout()

    pths = [fig_path.joinpath("accuracy_scatters.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
def plot_key(key, results_path, fig_path):
    fig, axes = plt.subplots(4, 5, sharey=True, sharex=True)

    means = pd.read_csv(
        results_path.joinpath("theta_time_stimulus.csv"))\
        .set_index(['stimulus', 'flipped', 'model'])\
        .groupby(level='model').get_group(key)

    for i, (stim, sdf) in enumerate(means.groupby(level='stimulus')):
        ax = axes.flat[i]

        for flipped, stats in sdf.groupby(level='flipped'):
            lower = stats['median'] - stats['lower']
            upper = stats['upper'] - stats['median']
            ax.errorbar(
                stats['modtheta'], stats['median'],
                yerr=[lower, upper],
                label=flipped, lw=3)

        ax.set_xlabel("Rotation")
        ax.set_xticks([0, 60, 120, 180])
        ax.set_xlim(-10, 190)
        util.clear_right(ax)
        util.clear_top(ax)
        util.outward_ticks(ax)
        ax.set_title("Stim %s" % stim)

    fig.set_figheight(8)
    fig.set_figwidth(10)

    plt.draw()
    plt.tight_layout()

    pths = [fig_path.joinpath("response_time_stimuli_%s.%s" % (key, ext))
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #46
0
def codeBlock(args,flag,data):
  PARAM_KEY = 1
  PARAM_FORMATTER = 2
  PARAM_FILE = 3
  PARAM_LINE = 4
  ARGUMENTS = len(args)-1

  # Ability to add a block of code through copy and paste and have it formatted correctly!
  if( keyExists("blocks",args[PARAM_KEY])):
    block = load("blocks/"+args[PARAM_KEY])
    # Format these blocks
    if(ARGUMENTS == PARAM_FORMATTER): # Format blocks
      block = format.block(block, args[PARAM_FORMATTER])
    if(ARGUMENTS <= PARAM_FORMATTER): # No file specified
      log(block)
    else:
      if(ARGUMENTS == PARAM_FILE): 
        log("Saving to file "+ args[PARAM_FILE] )
        save(args[PARAM_FILE],block)
      elif(ARGUMENTS >= PARAM_LINE): # Argument for line 
        save(args[PARAM_FILE],block,args[PARAM_LINE])
  else:
    error("Error: Block does not exist")
Beispiel #47
0
def insertModule (args,flag,data):
  # Creates a project based on file jsons on the layout of the code block
  # PARAM_CONSTRUCT = 2
  log("Creating module!")
  PARAM_KEY = 1
  PARAM_NAME_DIR = 2
  # key : value
  PARAM_FILES_BASE = 3 # Use this as key value stores, as we don't have a specific domain and naming
  data = [] 
  files = os.listdir("files")
  ARGUMENTS = len(args[PARAM_FILES_BASE:]) - 1
  print ARGUMENTS

  # Another method of constructing a project
  for x in args[PARAM_FILES_BASE:]:
    # Split the arguments here
    s = x.split(":") # Which is a key value store
    # We assume the first argument is the key value
    if s[0] in files:
      data.append(s)
    else :
      print "Error: Invalid File"
 
  save("projects/" + args[PARAM_KEY], json.dumps(data))
def plot(results_path, fig_path):
    order = ['exp', 'oc', 'th', 'hc', 'bq', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    colors = {
        'same': [ 0.16339869,  0.4449827 ,  0.69750096],
        'flipped': [ 0.72848904,  0.1550173 ,  0.19738562]
    }

    time_results = pd.read_csv(
        results_path.joinpath("theta_time.csv"))
    acc_results = pd.read_csv(
        results_path.joinpath("theta_accuracy.csv"))

    fig, axes = plt.subplots(2, len(order), sharex=True)
    for i, key in enumerate(order):
        ax = axes[0, i]
        df = time_results.groupby('model').get_group(key)

        for flipped, stats in df.groupby('flipped'):
            if key == 'exp':
                median = stats['median'] / 1000.
                lower = (stats['median'] - stats['lower']) / 1000.
                upper = (stats['upper'] - stats['median']) / 1000.
            else:
                median = stats['median']
                lower = stats['median'] - stats['lower']
                upper = stats['upper'] - stats['median']

            ax.errorbar(
                stats['modtheta'], median,
                yerr=[lower, upper], lw=3,
                color=colors[flipped],
                ecolor=colors[flipped])

        ax.set_xticks(np.arange(0, 200, 30))
        ax.set_xlim(-10, 190)

        ax.set_title(titles[key], fontsize=14)

        if key == 'exp':
            ax.set_ylabel("Response time", fontsize=14)
        else:
            ax.set_ylabel("# actions", fontsize=14)

    util.sync_ylims(axes[0, order.index('bq')], axes[0, order.index('bqp')])

    for i, key in enumerate(order):
        ax = axes[1, i]
        df = acc_results.groupby('model').get_group(key)

        for flipped, stats in df.groupby('flipped'):
            lower = stats['median'] - stats['lower']
            upper = stats['upper'] - stats['median']
            ax.errorbar(
                stats['modtheta'], stats['median'],
                yerr=[lower, upper], lw=3,
                color=colors[flipped],
                ecolor=colors[flipped])

        ax.set_xlim(-10, 190)
        ax.set_ylim(25, 105)
        ax.set_xticks(np.arange(0, 200, 30))
        ax.set_xlabel("Rotation", fontsize=14)

        ax.set_ylabel("Accuracy", fontsize=14)

    for ax in axes.flat:
        util.clear_right(ax)
        util.clear_top(ax)
        util.outward_ticks(ax)

    p0 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['same'],
        ec=colors['same'])
    p1 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['flipped'],
        ec=colors['flipped'])

    leg = axes[1, 1].legend(
        [p0, p1], ["\"same\" pairs", "\"flipped\" pairs"],
        numpoints=1, fontsize=12,
        loc='lower center',
        title='Stimuli')
    frame = leg.get_frame()
    frame.set_edgecolor('#FFFFFF')

    util.sync_ylabel_coords(axes.flat, -0.175)

    fig.set_figheight(4)
    fig.set_figwidth(18)

    plt.draw()
    plt.tight_layout()
    plt.subplots_adjust(wspace=0.4)

    pths = [fig_path.joinpath("response_time_accuracy.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #49
0
    pl.xlabel('Relative Importance')

    return fig

if __name__ == '__main__':
    from util import show, save
    ###ROC###
    Y_true = [1, 1, 0, 0]
    pred1 = [0.9, 0.8, 0.6, 0.4]
    pred2 = [0.8, 0.3, 0.5, 0.4]

    plot_roc(Y_true, [pred1])

    labels = ["Good model", "Bad model"]

    save(plot_roc(Y_true, [pred1, pred2], labels=labels), "roc.png")

    ###TRAIN_VS_TEST###
    from sklearn import linear_model

    ###########################################################################
    # Generate sample data
    n_samples_train, n_samples_test, n_features = 75, 150, 500
    #np.random.seed(0)
    coef = np.random.randn(n_features)
    coef[50:] = 0.0  # only the top 10 features are impacting the model
    X = np.random.randn(n_samples_train + n_samples_test, n_features)
    y = np.dot(X, coef)

    # Split train and test data
    X_train, X_test = X[:n_samples_train], X[n_samples_train:]
    data = np.empty((lenPatterns, numPatterns)).astype('i4')
    data[:, :-1] = addresses[:, 1:]
    data[:, -1] = addresses[:, -1]
        
    mem.writeM(addresses, data)

    # plot what we just wrote
    for i in xrange(numPatterns):
        plt.figure(50+curSeq)
        plt.subplot(1,numPatterns,i+1)
        plt.imshow((1-addresses[:,i]).reshape((figSize,figSize), order='F'),cmap='gray',interpolation='nearest')
        plt.xticks([],[])
        plt.yticks([],[])

    figName="storedSeq"+str(curSeq)
    save(figName, ext="png", close=True, verbose=True)


# now read sequentially with no noise used as input
#addresses = inputs.copy()
#for curPat in xrange(numPatterns):
#    a = addresses[:,curPat].copy()
#    d = mem.read(addresses[:,curPat]).reshape((figSize, figSize), order='F')
#    plt.figure(numPatterns+curPat)
#    plot_io(addresses[:,curPat].reshape((figSize, figSize), order='F'), d)



#
# read until converge, using retrieved data as the next address
#
Beispiel #51
0
    except:
        print "config_local.py is not present loading configdef.py"
        from config import *  #default configuration

    if cfg.savedir == "":
        #cfg.savedir=InriaPosData(basepath=cfg.dbpath).getStorageDir() #where to save
        cfg.savedir = VOC07Data(basepath=cfg.dbpath).getStorageDir()

    import sys
    if len(sys.argv) > 1:
        cfg.cls = sys.argv[1]

    cfg.testname = cfg.testpath + cfg.cls + (
        "%d" % cfg.numcl) + "_" + cfg.testspec
    cfg.train = "keep2"
    util.save(cfg.testname + ".cfg", cfg)

    cfg.auxdir = cfg.savedir
    testname = cfg.testname

    if cfg.multipr == 1:
        numcore = None
    else:
        numcore = cfg.multipr

    mypool = Pool(numcore)

    stcfg = stats([{"name": "cfg*"}])
    stcfg.report(testname + ".rpt.txt", "w", "Initial Configuration")

    sts = stats([{
	def save_tasks(self, task_file):
		"""Save the task file."""

		util.save(self.tasklist.tasks, task_file)
def plot(results_path, fig_path):
    order = ['exp', 'th', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    colors = {
        'same': [ 0.16339869,  0.4449827 ,  0.69750096],
        'flipped': [ 0.72848904,  0.1550173 ,  0.19738562]
    }

    means = pd.read_csv(
        results_path.joinpath("theta_time_stimulus.csv"))\
        .set_index(['stimulus', 'flipped', 'model'])\
        .groupby(level='stimulus').get_group(2)

    fig, axes = plt.subplots(1, len(order), sharex=True)
    for i, key in enumerate(order):
        ax = axes[i]
        df = means.groupby(level='model').get_group(key)

        for flipped, stats in df.groupby(level='flipped'):
            if key == 'exp':
                median = stats['median'] / 1000.
                lower = (stats['median'] - stats['lower']) / 1000.
                upper = (stats['upper'] - stats['median']) / 1000.
            else:
                median = stats['median']
                lower = stats['median'] - stats['lower']
                upper = stats['upper'] - stats['median']

            ax.errorbar(
                stats['modtheta'], median,
                yerr=[lower, upper], lw=3,
                color=colors[flipped],
                ecolor=colors[flipped])

        ax.set_xticks(np.arange(0, 200, 60))
        ax.set_xlim(-10, 190)
        ax.set_xlabel("Rotation")
        ax.set_title(titles[key])

        if key == "exp":
            ax.set_yticks([0.5, 1, 1.5, 2, 2.5, 3])
            ax.set_ylabel("Rotation")
        elif key == "th":
            ax.set_yticks([0, 10, 20, 30, 40, 50, 60])
            ax.set_ylabel("# Steps")
        elif key == "bqp":
            ax.set_yticks([10, 15, 20, 25])
            ax.set_ylabel("# Steps")

    p0 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['same'],
        ec=colors['same'])
    p1 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['flipped'],
        ec=colors['flipped'])

    leg = axes.flat[0].legend(
        [p0, p1], ["same", "flipped"],
        numpoints=1,
        loc='lower right',
        bbox_to_anchor=[1.1, 0])

    fig.set_size_inches(6, 2)
    sns.despine()
    plt.tight_layout()
    plt.subplots_adjust(left=0.1)

    pths = [fig_path.joinpath("response_time_stimulus.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
def plot(results_path, fig_path):
    order = ['exp', 'oc', 'th', 'hc', 'bq', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    colors = {
        'same': [0.16339869, 0.4449827, 0.69750096],
        'flipped': [0.72848904, 0.1550173, 0.19738562]
    }

    acc_results = pd.read_csv(
        results_path.joinpath("theta_accuracy.csv"))

    fig, axes = plt.subplots(2, 3, sharex=False)
    for i, key in enumerate(order):
        ax = axes.flat[i]
        df = acc_results.groupby('model').get_group(key)

        for flipped, stats in df.groupby('flipped'):
            lower = stats['median'] - stats['lower']
            upper = stats['upper'] - stats['median']
            ax.errorbar(
                stats['modtheta'], stats['median'],
                yerr=[lower, upper], lw=3,
                color=colors[flipped],
                ecolor=colors[flipped])

        ax.set_xlim(-10, 190)
        ax.set_ylim(-5, 105)
        ax.set_xticks(np.arange(0, 200, 60))
        ax.set_xlabel("Rotation")
        ax.set_yticks([0, 50, 100])
        ax.set_yticklabels([0.0, 0.5, 1.0])
        ax.set_title(titles[key])
        ax.set_ylabel("Accuracy")

    p0 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['same'],
        ec=colors['same'])
    p1 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['flipped'],
        ec=colors['flipped'])

    leg = axes[0, 0].legend(
        [p0, p1], ["same", "flipped"],
        numpoints=1,
        loc='lower right',
        bbox_to_anchor=[1.1, 0])

    sns.despine()
    fig.set_size_inches(6, 4)
    plt.tight_layout()
    plt.subplots_adjust(left=0.1)

    pths = [fig_path.joinpath("accuracy.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #55
0
        # negative phase
        cm.dot(w_vh, h, target=v)
        v.add_col_vec(w_v)
        v.apply_sigmoid()

        cm.dot(w_vh.T, v, target=h)
        h.add_col_vec(w_h)
        h.apply_sigmoid()

        wu_vh.subtract_dot(v, h.T)
        wu_v.add_sums(v, axis=1, mult=-1.0)
        wu_h.add_sums(h, axis=1, mult=-1.0)

        # update weights
        w_vh.add_mult(wu_vh, epsilon / batch_size)
        w_v.add_mult(wu_v, epsilon / batch_size)
        w_h.add_mult(wu_h, epsilon / batch_size)

        # calculate reconstruction error
        v.subtract(v_true)
        err.append(v.euclid_norm() ** 2 / (num_vis * batch_size))

    print "Mean squared error: " + str(np.mean(err))
    print "Time: " + str(time.time() - start_time)

w_vh.copy_to_host()
util.save("weights.dat", "w_vh", {"w_vh": w_vh.numpy_array})

# cm.cublas_shutdown()
Beispiel #56
0
        # negative phase
        cm.dot(w_vh, h, target = v)
        v.add_col_vec(w_v)
        v.apply_sigmoid()

        cm.dot(w_vh.T, v, target = h)
        h.add_col_vec(w_h)
        h.apply_sigmoid()

        wu_vh.subtract_dot(v, h.T)
        wu_v.add_sums(v, axis = 1, mult = -1.)
        wu_h.add_sums(h, axis = 1, mult = -1.)

        # update weights
        w_vh.add_mult(wu_vh, epsilon/batch_size)
        w_v.add_mult(wu_v, epsilon/batch_size)
        w_h.add_mult(wu_h, epsilon/batch_size)

        # calculate reconstruction error
        v.subtract(v_true)
        err.append(v.euclid_norm()**2/(num_vis*batch_size))

    print "Mean squared error: " + str(np.mean(err))
    print "Time: " + str(time.time() - start_time)

w_vh.copy_to_host()
util.save('weights.dat', 'w_vh', {'w_vh': w_vh.numpy_array})

cm.cublas_shutdown()
Beispiel #57
0
 def save(self, filename):
     data = {}
     for childName, child in self.children.iteritems():
         data[childName] = child.doSave()
     util.save(filename,data)
Beispiel #58
0
    rr.append(models[idm]["rho"])
    w1=numpy.concatenate((w1,waux[-1],-numpy.array([models[idm]["rho"]])/bias))
    sizereg[idm]=models[idm]["cost"].size
#w2=w #old w
w=w1

#add ids clsize and cumsize for each model
clsize=[]
cumsize=numpy.zeros(cfg.numcl+1,dtype=numpy.int)
for l in range(cfg.numcl):
    models[l]["id"]=l
    clsize.append(len(waux[l])+1)
    cumsize[l+1]=cumsize[l]+len(waux[l])+1
clsize=numpy.array(clsize)

util.save("%s%d.model"%(testname,0),models)
lg.info("Built first model")    

total=[]
posratio=[]
cache_full=False

#from scipy.ndimage import zoom
import detectCRF
from multiprocessing import Pool
import itertools

#just to compute totPosEx when using check points
arg=[]
for idl,l in enumerate(trPosImages):
    bb=l["bbox"]
Beispiel #59
0
def plot(results_path, fig_path):
    order = ['exp', 'th', 'bqp']
    titles = {
        'exp': "Human",
        'oc': "Oracle",
        'th': "Threshold",
        'hc': "HC",
        'bq': "BQ (equal prior)",
        'bqp': "BQ (unequal prior)"
    }

    colors = {
        'same': 'r',
        'flipped': 'b'
    }

    means = pd.read_csv(
        results_path.joinpath("theta_time_stimulus.csv"))\
        .set_index(['stimulus', 'flipped', 'model'])\
        .groupby(level='stimulus').get_group(2)

    fig, axes = plt.subplots(1, len(order), sharex=True)
    for i, key in enumerate(order):
        ax = axes[i]
        df = means.groupby(level='model').get_group(key)

        for flipped, stats in df.groupby(level='flipped'):
            if key == 'exp':
                median = stats['median'] / 1000.
                lower = (stats['median'] - stats['lower']) / 1000.
                upper = (stats['upper'] - stats['median']) / 1000.
            else:
                median = stats['median']
                lower = stats['median'] - stats['lower']
                upper = stats['upper'] - stats['median']

            ax.errorbar(
                stats['modtheta'], median,
                yerr=[lower, upper], lw=3,
                color=colors[flipped],
                ecolor=colors[flipped])

        ax.set_xticks(np.arange(0, 200, 30))
        ax.set_xlim(-10, 190)
        ax.set_xlabel("Rotation")
        ax.set_title(titles[key], fontsize=14)

        if key == 'exp':
            ax.set_ylabel("Response time", fontsize=14)
        else:
            ax.set_ylabel("Number of actions", fontsize=14)

    # util.sync_ylims(axes[order.index('bq')], axes[order.index('bqp')])

    for ax in axes.flat:
        util.clear_right(ax)
        util.clear_top(ax)
        util.outward_ticks(ax)
        ax.set_axis_bgcolor('0.95')

    p0 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['same'],
        ec=colors['same'])
    p1 = plt.Rectangle(
        (0, 0), 1, 1,
        fc=colors['flipped'],
        ec=colors['flipped'])

    leg = axes[0].legend(
        [p0, p1], ["same", "flipped"],
        numpoints=1, fontsize=12,
        loc='lower right')
    frame = leg.get_frame()
    frame.set_edgecolor('#FFFFFF')

    util.sync_ylabel_coords(axes.flat, -0.175)

    fig.set_figheight(3)
    fig.set_figwidth(9)

    plt.draw()
    plt.tight_layout()
    plt.subplots_adjust(wspace=0.4)

    pths = [fig_path.joinpath("response_time_stimulus.%s" % ext)
            for ext in ('png', 'pdf')]
    for pth in pths:
        util.save(pth, close=False)
    return pths
Beispiel #60
0
def runtest(models,tsImages,cfg,parallel=True,numcore=4,detfun=detectCRF.test,save=False,show=False,pool=None):

    #parallel=True
    #cfg.show=not(parallel)
    #numcore=4
    #mycfg=
    if parallel:
        if pool!=None:
            mypool=pool #use already created pool
        else:
            mypool = Pool(numcore)
    arg=[]

    for idl,l in enumerate(tsImages):
        #bb=l["bbox"]
        #for idb,b in enumerate(bb):
        arg.append({"idim":idl,"file":l["name"],"idbb":0,"bbox":[],"models":models,"cfg":cfg,"flip":False})    

    print "----------Test-----------"
    ltdet=[];
    if not(parallel):
        #itr=itertools.imap(detectCRF.test,arg)        
        #itr=itertools.imap(lambda x:detectCRF.test(x,numhyp=1),arg) #this can also be used       
        itr=itertools.imap(detfun,arg)
    else:
        #itr=mypool.map(detectCRF.test,arg)
        itr=mypool.imap(detfun,arg) #for parallle lambda does not work

    for ii,res in enumerate(itr):
        if show:
            im=myimread(arg[ii]["file"])
            if tsImages[ii]["bbox"]!=[]:
                detectCRF.visualize2(res[:3],cfg.N,im,bb=tsImages[ii]["bbox"][0])
            else:
                detectCRF.visualize2(res[:3],cfg.N,im)
            print [x["scr"] for x in res[:5]]
        ltdet+=res

    if parallel:
        if pool==None:
            mypool.close() 
            mypool.join() 

    #sort detections
    ltosort=[-x["scr"] for x in ltdet]
    lord=numpy.argsort(ltosort)
    aux=[]
    for l in lord:
        aux.append(ltdet[l])
    ltdet=aux

    #save on a file and evaluate with annotations
    detVOC=[]
    for l in ltdet:
        detVOC.append([l["idim"].split("/")[-1].split(".")[0],l["scr"],l["bbox"][1],l["bbox"][0],l["bbox"][3],l["bbox"][2]])

    #plot AP
    tp,fp,scr,tot=VOCpr.VOCprRecord(tsImages,detVOC,show=False,ovr=0.5)
    pylab.figure(15,figsize=(4,4))
    pylab.clf()
    rc,pr,ap=VOCpr.drawPrfast(tp,fp,tot)
    pylab.draw()
    pylab.show()
    #save in different formats
    if type(save)==str:
        testname=save
        util.savedetVOC(detVOC,testname+".txt")
        util.save(testname+".det",{"det":ltdet[:500]})#takes a lot of space use only first 500
        util.savemat(testname+".mat",{"tp":tp,"fp":fp,"scr":scr,"tot":tot,"rc":rc,"pr":pr,"ap":ap})
        pylab.savefig(testname+".png")
    return ap