def reduce_silence(path, audio_file, name_audio):
    cut_times = 0
    start_time = 0
    audios_with_silence = []
    new_audios = []
    end_time = 0
    silence_time = 0
    score_final = 0
    detect_speech(audio_file)
    text_file = open("result.txt", "r")
    for text_line in text_file.readlines():
        text_line = text_line.rstrip()
        items = text_line.split(" ")
        time_mil_1_final = get_time_file(items[1])
        time_mil_2_final = get_time_file(items[2])
        if score_final != 0:
            silence_time = time_mil_1_final - score_final
        if silence_time > 1.0:
            end_time = score_final
            new_audio_file = split_audios_with_times(path, name_audio,
                                                     start_time, end_time,
                                                     audio_file, cut_times)
            new_audios.append(new_audio_file)
            audios_with_silence.append(
                mixed_silence(path, name_audio, new_audio_file, cut_times))
            start_time = time_mil_1_final
            cut_times = cut_times + 1
        score_final = time_mil_2_final
    audios_with_silence.append(
        split_audios_with_times(path, name_audio, start_time, 30, audio_file,
                                cut_times + 1))
    mixed_audio(path, name_audio, audios_with_silence, audio_file)
    utils.delete_files(new_audios)
    utils.delete_files(audios_with_silence)
    utils.delete_file("result.txt")
def create_note(text, title=None):
    note = Types.Note()
    note.resources = []
    note.title = "Test note from EDAMTest.py" if title == None else title
    files = list_folder()
    # The content of an Evernote note is represented using Evernote Markup Language
    # (ENML). The full ENML specification can be found in the Evernote API Overview
    # at http://dev.evernote.com/documentation/cloud/chapters/ENML.php
    note.content = '<?xml version="1.0" encoding="UTF-8"?>'
    note.content += '<!DOCTYPE en-note SYSTEM ' \
                    '"http://xml.evernote.com/pub/enml2.dtd">'
    note.content += '<en-note>'
    note.content += f'{text}<br/>'
    if len(files) > 0:
        for file in files:
            print(file)
            note.content += add_img(note, file)
            print(note.content)
    note.content += '</en-note>'

    # Finally, send the new note to Evernote using the createNote method
    # The new Note object that is returned will contain server-generated
    # attributes such as the new note's unique GUID.
    created_note = note_store.createNote(note)
    delete_files()

    print("Successfully created a new note with GUID: ", created_note.guid)
示例#3
0
 def reset_camera(video_camera, streamer):
     delete_files()
     if video_camera:
         video_camera.release()
     video_camera = VideoCamera()
     video_camera.initialize()
     streamer = CameraStream(camera_source=video_camera)
     return (video_camera, streamer,)
示例#4
0
def main():
    utils.delete_files()
    data = Data()
    if data.algorithm == Algorithm.GENETIC:
        algorithm = GeneticAlgorithm(data)
    else:
        algorithm = CSPAlgorithm(data)
    algorithm.run()
    print("Finished!")
示例#5
0
def run(seed, save_path, network, em, dataset, _rnd, debug):
    ex.commands['print_config']()

    # create storage directories if they don't exist yet
    utils.create_directory('networks')
    utils.create_directory('results')

    if debug:
        utils.create_directory('debug_output')
        utils.delete_files('debug_output')

    # seed numpy
    np.random.seed(seed)

    # load data
    data = get_training_data()

    # if debug set to true the trainer will report on the dae performance
    save_img_ind = np.random.randint(data['valid_data'].shape[1])
    if debug:
        utils.save_image('debug_output/valid_image.jpg', data['valid_data'][0, save_img_ind])

    # produce saving directory filenames
    folder_name = dataset['name'] + "_" + dataset['train_set'] + "_" + time.strftime(
        "%d_%m_%Y_%H:%M:%S") + "_" + str(seed)
    net_folder_path = os.path.join(save_path, folder_name)
    utils.create_directory(net_folder_path)

    model_config_path = os.path.join(net_folder_path, "model_config.pickle")
    em_dump_path = None

    if em['suffix'] is not None:
        em_dump_path = 'results/{}_{}_{}{}.pickle'.format(dataset['name'], em['nr_iters'], em['k'], em['suffix'])

    # save net structure
    with open(model_config_path, 'wb') as f:
        pickle.dump({'network': network, 'em': em, 'dataset': dataset}, f)

    # train dea
    train_dae(data=data, net_folder_path=net_folder_path, **{'save_img_ind': save_img_ind})

    # load test data
    data = get_test_data()
    test_data = data['test_data']
    test_groups = data['test_groups'] if 'test_groups' in data.keys() else None

    # perform rc on best model
    return perform_reconstruction_clustering_from_file(net_folder_path, test_data, test_groups, _rnd, em_dump_path=em_dump_path)
示例#6
0
 def upload_and_delete_files_wrapper(self, service, from_dir_path, files_upload, files_delete):
     '''
     Args:
         service: drive api service instance
         from_dir_path: directory path to upload files from
         files_upload: list of files to upload
         files_to_delete: list of files under from_dir_path, used to purge directory after upload completes
     '''
     utils.log('Upload ' + str(len(files_upload)) + ' file(s)')
     for f in files_upload:
         self.upload_file(service, ''.join([from_dir_path, f]), 'application/octet-stream', body={'title': f, 'description': 'zip file', 'mimeType': 'application/octet-stream'})
         files_delete.append(f)
         print '*',
     utils.log('Uploaded')
     utils.delete_files(from_dir_path, files_delete)
         
示例#7
0
def reduce_silence(audio_file):
	cut_times = 0
	start_time = 0
	audios_with_silence = []
	new_audios = []
	end_time = 0
	silence_time = 0
	score_final = 0
	detect_speech(audio_file)
	text_file = open("result.txt", "r")
	for text_line in text_file.readlines():
		text_line = text_line.rstrip()
		items = text_line.split(" ")
		time1 = items[1].split(":")
		time2 = items[2].split(":")
		time_mil_1 = time1[2].split(".")
		time_mil_2 = time2[2].split(".")
		time_mil_1_final = 0
		time_mil_2_final = 0
		time_mil_1_final = float(float(time_mil_1[1])/1000)+float(time_mil_1[0])
		time_mil_2_final = float(float(time_mil_2[1])/1000)+float(time_mil_2[0])
		if score_final != 0:
			silence_time = time_mil_1_final-score_final
		if silence_time > 1.0:
			end_time = score_final
			new_audio_file = split_audios(start_time,end_time,audio_file,cut_times)
			new_audios.append(new_audio_file)
			audios_with_silence.append(mixed_silence(new_audio_file,cut_times))
			start_time = time_mil_1_final
			cut_times = cut_times + 1
		score_final = time_mil_2_final
	audios_with_silence.append(split_audios(start_time,30,audio_file,cut_times+1))
	mixed_audio(audios_with_silence,audio_file)
	utils.delete_files(new_audios)
	utils.delete_files(audios_with_silence)
	utils.delete_file("result.txt")
示例#8
0
    def train(self, scratch, game, display):
        p = PLE(game,
                fps=30,
                frame_skip=1,
                num_steps=1,
                force_fps=True,
                display_screen=display)
        t1 = time.time()
        fname = None
        if not scratch:
            fname = self.load()
        else:
            delete_files(self.DATA_DIREC)
        f0, step, nb_save, nb_games = init_train(fname, self.DATA_DIREC)

        eps_tau = (self.NB_FRAMES - f0) // 8

        scores = []
        while step < self.NB_FRAMES:
            if len(scores) == self.SCORE_FREQ:
                print('States visited:', len(self.Q))
                print_scores(scores, self.SCORE_FREQ)
                scores = []
            p.reset_game()
            state = game.getGameState()
            state_tp = self.discretize(state)
            if state_tp not in self.Q:
                self.Q[state_tp] = [0, 0]

            act = 1
            episode = deque([], self.SIZE_FIFO)
            elig = {}
            gscore = 0
            nb_games += 1
            while not p.game_over():
                step += 1
                if step != 0 and (step % self.SAVE_FREQ) == 0:
                    self.save('Q_' + chr(97 + nb_save) + '_' + str(step) +
                              '_' + str(nb_games) + '.p')
                    nb_save += 1
                if step != 0 and (step % self.EPS_UPDATE_FREQ) == 0:
                    self.epsilon = update_epsilon(step, f0, self.EPS0, eps_tau,
                                                  self.NB_FRAMES)
                # 1) Observe r, s′
                bare_reward = p.act(ACTIONS[act])
                reward = self.reward_engineering(bare_reward)
                new_state = game.getGameState()
                new_state_tp = self.discretize(new_state)

                # 2) Choose a′ (GLIE actor) using Q
                if new_state_tp not in self.Q:
                    self.Q[new_state_tp] = [0, 0]
                qvals = self.get_qvals(new_state)
                new_act = self.greedy_action(qvals, self.epsilon)

                # 3) Temporal difference:  δ=r+γQ(s′,a′)−Q(s,a)
                delta = reward + self.GAMMA * self.Q[new_state_tp][
                    new_act] - self.Q[state_tp][act]

                # 4) Update Q
                episode.append((state_tp, act))
                elig[(state_tp, act)] = 1
                for (state_tp_ep, act_ep) in episode:
                    self.Q[state_tp_ep][act_ep] += (
                        self.ALPHA * delta * elig[(state_tp_ep, act_ep)])
                    elig[(state_tp_ep, act_ep)] *= self.LAMBDA

                # 5) s<-s', a<-a'
                state = new_state
                state_tp = new_state_tp
                act = new_act

                if bare_reward > 0:
                    gscore += 1

            scores.append(gscore)

        t2 = time.time()
        # Unicode code point of a: 97
        self.save('Q_' + chr(97 + nb_save) + '_' + str(step) + '_' +
                  str(nb_games) + '.p')
        print()
        print('Number of played games:', nb_games)
        print('Training completed in', (t2 - t1) / 60, 'minutes')
        print()
示例#9
0
    def train(self, scratch, game, display):
        p = PLE(game,
                fps=30,
                frame_skip=1,
                num_steps=1,
                force_fps=True,
                display_screen=display)
        fname = None
        if not scratch:
            fname = self.load()
        else:
            delete_files(self.DATA_DIREC)
        f0, step, nb_save, nb_games = init_train(fname, self.DATA_DIREC)

        eps_tau = (self.NB_FRAMES - f0) // self.EPS_RATE
        scores = []
        while step < self.NB_FRAMES:
            if len(scores) == self.SCORE_FREQ:
                print_scores(scores, self.SCORE_FREQ)
                scores = []

            p.reset_game()
            state = game.getGameState()
            state_arr = self.state_to_arr(state)
            # state_arr = self.scaler.transform(state_arr.reshape(1, -1))
            gscore = 0
            nb_games += 1
            while not p.game_over():
                step += 1
                if step != 0 and (step % self.SAVE_FREQ) == 0:
                    self.save(
                        chr(97 + nb_save) + '_' + str(step) + '_' +
                        str(nb_games))
                    nb_save += 1
                if step != 0 and (step % self.EPS_UPDATE_FREQ) == 0:
                    self.epsilon = update_epsilon(step, f0, self.EPS0, eps_tau,
                                                  self.NB_FRAMES)
                    print('WEIGHTS ABS MEAN')
                    print(abs(np.mean(self.model.get_weights()[0], axis=1)))

                # 1) In s, choose a (GLIE actor)
                qvals = self.get_qvals(state)
                act = self.greedy_action(qvals, self.epsilon)

                # 2) Observe r, s′
                bare_reward = p.act(ACTIONS[act])
                reward = self.reward_engineering(bare_reward)
                new_state = game.getGameState()
                new_state_arr = self.state_to_arr(state)

                self.replay_memory.append(
                    (state_arr, act, reward, new_state_arr))
                if (len(self.replay_memory) == self.BUFFER_SIZE
                        and step % self.TRAIN_FREQ == 0):

                    X_train = []
                    y_train = []

                    # TEST: TRAIN ONLY WITH A SMALL BUFFER BATCH
                    replay_memory_copy = list(self.replay_memory)[:]
                    random.shuffle(replay_memory_copy)
                    for frame in replay_memory_copy[:self.BATCH_SIZE]:
                        s_arr_1, act_x, bare_reward_x, s_arr_2 = frame
                        reward_x = self.reward_engineering(bare_reward_x)
                        old_qval = self.model.predict(s_arr_1, batch_size=1)
                        qval_new = self.model.predict(s_arr_2, batch_size=1)
                        max_qval = np.max(qval_new)
                        # terminal state
                        if bare_reward < 0:
                            delta = reward_x
                        else:
                            delta = reward_x + self.GAMMA * max_qval
                        y = np.zeros((1, len(ACTIONS)))
                        y[0][:] = old_qval[0][:]
                        y[0][act_x] = old_qval[0][act_x] + self.ALPHA * delta
                        X_train.append(s_arr_1.reshape(len(STATES), ))
                        y_train.append(y.reshape(len(ACTIONS), ))

                    X_train = np.array(X_train)
                    y_train = np.array(y_train)
                    self.model.fit(X_train,
                                   y_train,
                                   batch_size=self.BATCH_SIZE,
                                   epochs=2,
                                   verbose=False)

                # 5) s <- s'
                state = new_state
                state_arr = new_state_arr

                if bare_reward > 0:
                    gscore += 1
            scores.append(gscore)

        self.save(chr(97 + nb_save) + '_' + str(step) + '_' + str(nb_games))
示例#10
0
    def train(self, scratch, game, display):
        p = PLE(game,
                fps=30,
                frame_skip=1,
                num_steps=1,
                force_fps=True,
                display_screen=display)
        fname = None
        if not scratch:
            fname = self.load()
        else:
            delete_files(self.DATA_DIREC)
        f0, step, nb_save, nb_games = init_train(fname, self.DATA_DIREC)

        eps_tau = (self.NB_FRAMES - f0) // self.EPS_RATE
        scores = []
        while step < self.NB_FRAMES:
            if len(scores) == self.SCORE_FREQ:
                print_scores(scores, self.SCORE_FREQ)
                scores = []

            p.reset_game()
            self.game.getGameState()
            screen = self.process_screen(p.getScreenRGB())
            last_screens_buff = deque([screen] * 4, maxlen=NB_LAST_SCREENS)
            last_screens = np.stack(last_screens_buff, axis=-1)

            # gscore = 0
            nb_games += 1
            score = 0
            while not p.game_over():
                step += 1
                if step != 0 and (step % self.SAVE_FREQ) == 0:
                    self.save(
                        chr(97 + nb_save) + '_' + str(step) + '_' +
                        str(nb_games))
                    nb_save += 1
                if step != 0 and (step % self.EPS_UPDATE_FREQ) == 0:
                    self.epsilon = update_epsilon(step, f0, self.EPS0, eps_tau,
                                                  self.NB_FRAMES)
                    # print('WEIGHTS ABS MEAN')
                    # print(abs(np.mean(self.model.get_weights()[0], axis=1)))

                # 1) In s, choose a (GLIE actor)
                qvals = self.get_qvals(last_screens)
                act = self.greedy_action(qvals, self.epsilon)

                # 2) Observe r, s′
                bare_reward = p.act(ACTIONS[act])
                if bare_reward > 0:
                    score += 1
                reward = self.reward_engineering(bare_reward)
                screen_new = self.process_screen(p.getScreenRGB())

                # update replay_memory
                self.replay_memory.append(screen, act, screen_new, reward)
                if len(self.replay_memory.buff) > self.MIN_REPLAY_MEMORY_SIZE:
                    # build minibatch
                    ls, actions, ls_new, r, terms = self.replay_memory.minibatch(
                    )
                    qvals_new = self.model_target.predict(ls_new)
                    qvals_new_max = qvals_new.max(1).reshape(
                        (self.BATCH_SIZE, 1))
                    delta = r + (1 - terms) * self.GAMMA * qvals_new_max
                    qvals = self.model.predict(ls)
                    qvals[np.arange(self.BATCH_SIZE),
                          actions.ravel()] = delta.ravel()
                    self.model.train_on_batch(x=ls, y=qvals)

                    if step % self.TARGET_FREQ == 0:
                        self.model.save(filepath=self.DATA_DIREC + 'target.h5')
                        self.model_target = load_model(
                            filepath=self.DATA_DIREC + 'target.h5')

                last_screens_buff.append(screen_new)
                last_screens = np.stack(last_screens_buff, axis=-1)
                screen = screen_new
            scores.append(score)
示例#11
0
        saved_id = self.id # delete() unsets the ID attribute, which we need in order to clear out the directory
        super(Work, self).delete()
        fname = "saved-work-%d.zip" % saved_id
        self.id = saved_id
        if hasattr(settings, "ATTIC"):
            try:
                target = os.path.join(settings.ATTIC, fname)
                log.info("Saving work to %s" % target)
                archive = utils.recreate_ingest_package(self)
                utils.movefile(archive,target)
            except OSError, ose:
                log.error("Error encountered saving deleted work (id:%s) to %s: %r" % ( saved_id, target, ose))
        else:
            log.info("Cleaning up %s" % (self.get_content_directory()))
        try:
            utils.delete_files(content_dir)
        except Exception, e:
            log.exception("unable to delete %s: %r" % ( content_dir, e ) )
    
            
# this was an example of an image upload field that would automatically save a thumbnail of 
# the uploaded image.  It works, but it's not needed here.

##class ThumbnailImageField(models.ImageField):
##    """
##    Subclass of standard ImageField that automatically thumbnails the incoming image.
##    """
##    def __init__(self,*args,**kwargs):
##        md = kwargs.get('max_dimension', 64)
##        if not isinstance(md, int):
##            md = 64
示例#12
0
def run(net_path, training, validation, nem, seed, log_dir,
        _run):  # #944371721 is fixed seed?
    # seed = 944371721  # or 1 (-0 -0 0.1 0.4)
    # tf.set_random_seed(seed)
    print('seed: ', seed)
    # clear debug dir
    if log_dir and net_path is None:
        utils.create_directory(log_dir)
        utils.delete_files(log_dir, recursive=True)

    # Set up data pipelines
    nr_iters = nem['nr_steps'] + 1
    train_inputs = InputPipeLine('training',
                                 shuffle=True,
                                 sequence_length=nr_iters,
                                 batch_size=training['batch_size'])
    valid_inputs = InputPipeLine('validation',
                                 shuffle=False,
                                 sequence_length=nr_iters,
                                 batch_size=validation['batch_size'])
    # Set Weigth
    with tf.variable_scope("context_and_klloss_weight"):
        context_weight = tf.get_variable(name='context_weight',
                                         dtype=tf.float32,
                                         trainable=False,
                                         initializer=0.0)
        klloss_weight = tf.get_variable(name='klloss_weight',
                                        dtype=tf.float32,
                                        trainable=False,
                                        initializer=0.0)
    # Build Graph
    train_op, train_op_v, train_graph, valid_graph, debug_graph = build_graphs(
        train_inputs.output, valid_inputs.output)
    init = tf.global_variables_initializer()

    # print vars
    utils.print_vars(tf.trainable_variables())
    utils.print_vars(tf.global_variables())

    with tf.Session() as session:

        # continue training from net_path if specified.
        saver = tf.train.Saver()
        if net_path is not None:
            saver.restore(session, net_path)
        else:
            session.run(init)

        # start training pipelines
        coord = tf.train.Coordinator()
        train_enqueue_thread = threading.Thread(target=train_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(train_enqueue_thread)
        train_enqueue_thread.start()
        valid_enqueue_thread = threading.Thread(target=valid_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(valid_enqueue_thread)
        valid_enqueue_thread.start()

        best_valid_loss = np.inf
        best_valid_epoch = 0
        for epoch in range(1, training['max_epoch'] + 1):

            t = time.time()
            if epoch < 20:  # 5 for em , 20 for rnn-em
                update_context_weight = tf.assign(context_weight, 0.)
                update_klloss_weight = tf.assign(klloss_weight, 0.)
                train_op_v = None
            else:
                update_context_weight = tf.assign(context_weight, 1.)
                update_klloss_weight = tf.assign(klloss_weight, 1.)
            session.run(update_context_weight)
            session.run(update_klloss_weight)
            #  ---------------
            print('klloss_weight before: ', session.run(klloss_weight))
            weight = tf.get_variable(
                name=
                'NEM/multi_rnn_cell/cell_0/EMCell_discri/input_discri/fully_connected_2/weights'
            )
            weight = tf.Print(weight, ['-----------------', weight])
            # session.run(weight)
            print(weight.eval()[:, 0])
            #  ---------------------
            train_loss, others, train_scores, train_ub_loss_last, train_variational_loss = run_epoch(
                session,
                train_inputs,
                train_graph,
                debug_graph,
                training['debug_samples'],
                "train_e{}".format(epoch),
                train_op=train_op,
                train_op_v=train_op_v)
            #  ---------------------
            print('klloss_weight after: ', session.run(klloss_weight))
            weight = tf.get_variable(
                name=
                'NEM/multi_rnn_cell/cell_0/EMCell_discri/input_discri/fully_connected_2/weights'
            )
            weight = tf.Print(weight, ['-----------------', weight])
            session.run(weight)
            print(weight.eval()[:, 0])
            #  -------------------
            #  ---------------------
            # weight = tf.get_variable(name='train/NEM/step_14/multi_rnn_cell/cell_0/EMCell_discri/input_discri/weighted_context')
            # weight = tf.Print(weight, ['-----------------', weight])
            # session.run(weight)
            # print(weight.eval()[:,0])
            #  -------------------
            add_log('training.loss', train_loss)
            add_log('training.others', others)
            add_log('training.score', train_scores[0])
            add_log('training.score_last', train_scores[1])
            add_log('training.ub_loss_last', train_ub_loss_last)

            create_curve_plots('train_loss', get_logs('training.loss'),
                               [0, 2000])

            print(
                "Epoch: %d Train Loss: %.3f, ARI: %.3f (conf: %0.3f), Last ARI: %.3f (conf: %.3f) took %.3fs"
                % (epoch, train_loss, train_scores[0], train_scores[2],
                   train_scores[1], train_scores[3], time.time() - t))
            print("    Other Train Losses:      ({})".format(", ".join(
                ["%.2f" % o for o in others.mean(0)])))
            print("    Train Loss UB last: %.2f" % train_ub_loss_last)
            # print("    Train Loss including variational loss--final loss: %.2f" % train_loss+train_variational_loss)
            print('    Train Variatioanl Los:', train_variational_loss)

            t = time.time()
            valid_loss, others, valid_scores, valid_ub_loss_last, valid_variational_loss = run_epoch(
                session, valid_inputs, valid_graph, debug_graph,
                validation['debug_samples'], "valid_e{}".format(epoch))

            # valid_scores = seq_ARI, last_ARI, seq_conf, last_conf
            add_log('validation.loss', valid_loss)
            add_log('validation.others', others)
            add_log('validation.score', valid_scores[0])
            add_log('validation.score_last', valid_scores[1])
            add_log('validation.ub_loss_last', valid_ub_loss_last)

            create_curve_plots('valid_loss', get_logs('validation.loss'),
                               [0, 2000])
            create_curve_plots('valid_score', get_logs('validation.score'),
                               [0, 1])
            create_curve_plots('valid_score_last',
                               get_logs('validation.score_last'), [0, 1])

            print(
                "    Validation Loss: %.3f, ARI: %.3f (conf: %0.3f), Last ARI: %.3f (conf: %.3f) took %.3fs"
                % (valid_loss, valid_scores[0], valid_scores[2],
                   valid_scores[1], valid_scores[3], time.time() - t))
            print("    Other Validation Losses: ({})".format(", ".join(
                ["%.2f" % o for o in others.mean(0)])))
            print("    Valid Loss UB last: %.2f" % valid_ub_loss_last)
            print("    Valid Variational Loss:", valid_variational_loss)

            if valid_loss < best_valid_loss:
                best_valid_loss = valid_loss
                best_valid_epoch = epoch
                _run.result = float(valid_scores[0]), float(
                    valid_scores[1]), float(valid_loss)
                print("    Best validation loss improved to %.03f" %
                      best_valid_loss)
                save_destination = saver.save(
                    session, os.path.abspath(os.path.join(log_dir, 'best')))
                print("    Saved to:", save_destination)
            if epoch in training['save_epochs']:
                save_destination = saver.save(
                    session,
                    os.path.abspath(
                        os.path.join(log_dir, 'epoch_{}'.format(epoch))))
                print("    Saved to:", save_destination)
            best_valid_loss = min(best_valid_loss, valid_loss)
            if best_valid_loss < np.min(
                    get_logs('validation.loss')[-training['max_patience']:]):
                print(
                    'Early Stopping because validation loss did not improve for {} epochs'
                    .format(training['max_patience']))
                break

            if np.isnan(valid_loss):
                print('Early Stopping because validation loss is nan')
                pass
                # break

        # shutdown everything to avoid zombies
        coord.request_stop()
        session.run(train_inputs.queue.close(cancel_pending_enqueues=True))
        session.run(valid_inputs.queue.close(cancel_pending_enqueues=True))
        coord.join()

    return float(get_logs('validation.score')[best_valid_epoch - 1]), float(
        get_logs('validation.score_last')[best_valid_epoch - 1]), float(
            get_logs('validation.loss')[best_valid_epoch - 1])
示例#13
0
def clean_up_intermediate_files():
    delete_files(TMP_DIR + '*.sh')
    delete_files(TMP_DIR + '*.o')
    delete_files(TMP_DIR + '*.c')
示例#14
0
def main():
    utils.delete_files()
    data = Data()
    algorithm = GeneticAlgorithm(data)
    algorithm.run()
    print("Finished!")
示例#15
0
def run(record_grouping_score, record_relational_loss, feed_actions, net_path,
        training, validation, nem, dt, seed, log_dir, _run):
    save_epochs = training['save_epochs']

    # clear debug dir
    if log_dir and net_path is None:
        utils.create_directory(log_dir)
        utils.delete_files(log_dir, recursive=True)

    # prep weights for print out
    loss_step_weights = get_loss_step_weights()
    s_loss_weights = np.sum(loss_step_weights)
    dt_s_loss_weights = np.sum(loss_step_weights[-dt:])

    # Set up data pipelines
    nr_iters = nem['nr_steps'] + 1
    out_list = ['features']
    out_list.append('groups') if record_grouping_score else None
    out_list.append(record_relational_loss) if record_relational_loss else None
    out_list.append('actions') if feed_actions else None

    train_inputs = InputPipeLine('training',
                                 shuffle=True,
                                 out_list=out_list,
                                 sequence_length=nr_iters,
                                 batch_size=training['batch_size'])
    valid_inputs = InputPipeLine('validation',
                                 shuffle=False,
                                 out_list=out_list,
                                 sequence_length=nr_iters,
                                 batch_size=validation['batch_size'])

    # Build Graph
    train_op, train_graph, valid_graph, debug_graph = build_graphs(
        train_inputs.output, valid_inputs.output)
    init = tf.global_variables_initializer()

    # print vars
    utils.print_vars(tf.trainable_variables())

    with tf.Session() as session:
        tf.set_random_seed(seed)

        # continue training from net_path if specified
        saver = tf.train.Saver()
        if net_path is not None:
            saver.restore(session, net_path)
        else:
            session.run(init)

        # start training pipelines
        writer = tf.summary.FileWriter(
            log_dir,
            graph=session.graph,
        )
        coord = tf.train.Coordinator()
        train_enqueue_thread = threading.Thread(target=train_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(train_enqueue_thread)
        train_enqueue_thread.start()
        valid_enqueue_thread = threading.Thread(target=valid_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(valid_enqueue_thread)
        valid_enqueue_thread.start()

        best_valid_loss = np.inf
        best_valid_epoch = 0
        for epoch in range(1, training['max_epoch'] + 1):
            # run train epoch
            t = time.time()
            log_dict = run_epoch(session,
                                 train_inputs,
                                 train_graph,
                                 debug_graph,
                                 training['debug_samples'],
                                 "train_e{}".format(epoch),
                                 train_op=train_op)

            # log all items in dict
            log_log_dict('training', log_dict)

            # produce print-out
            print("\n" + 80 * "%" + "    EPOCH {}   ".format(epoch) + 80 * "%")
            print_log_dict(log_dict, 'Train', t, dt, s_loss_weights,
                           dt_s_loss_weights)

            # run valid epoch
            t = time.time()
            log_dict = run_epoch(session, valid_inputs, valid_graph,
                                 debug_graph, validation['debug_samples'],
                                 "valid_e{}".format(epoch))

            # add logs
            log_log_dict('validation', log_dict)

            # produce plots
            create_curve_plots(
                'loss', {
                    'training': get_logs('training.loss'),
                    'validation': get_logs('validation.loss')
                }, [0, 1000], [0, 200])
            create_curve_plots(
                'r_loss', {
                    'training': get_logs('training.r_loss'),
                    'validation': get_logs('validation.r_loss')
                }, [0, 100], [0, 20])

            create_curve_plots(
                'score', {
                    'score': get_logs('validation.score'),
                    'score_last': get_logs('validation.score_last')
                }, [0, 1], None)

            # produce print-out
            print("\n")
            print_log_dict(log_dict, 'Validation', t, dt, s_loss_weights,
                           dt_s_loss_weights)

            if log_dict['loss'] < best_valid_loss:
                best_valid_loss = log_dict['loss']
                best_valid_epoch = epoch
                _run.result = float(log_dict['score']), float(log_dict['score_last']), \
                              float(log_dict['loss']), float(log_dict['ub_loss']), \
                              float(np.sum(log_dict['others'][-dt:, 1])/dt_s_loss_weights), \
                              float(np.sum(log_dict['others_ub'][-dt:, 1]) / dt_s_loss_weights), \
                              float(np.sum(log_dict['others'][-dt:, 2]) / dt_s_loss_weights), \
                              float(np.sum(log_dict['others_ub'][-dt:, 2]) / dt_s_loss_weights), \
                              float(log_dict['r_loss']), float(log_dict['r_ub_loss']), \
                              float(np.sum(log_dict['r_others'][-dt:, 1]) / dt_s_loss_weights), \
                              float(np.sum(log_dict['r_others_ub'][-dt:, 1]) / dt_s_loss_weights), \
                              float(np.sum(log_dict['r_others'][-dt:, 2]) / dt_s_loss_weights), \
                              float(np.sum(log_dict['r_others_ub'][-dt:, 2]) / dt_s_loss_weights)

                print("    Best validation loss improved to %.03f" %
                      best_valid_loss)
                save_destination = saver.save(
                    session, os.path.abspath(os.path.join(log_dir, 'best')))
                print("    Saved to:", save_destination)
            if epoch in save_epochs:
                save_destination = saver.save(
                    session,
                    os.path.abspath(
                        os.path.join(log_dir, 'epoch_{}'.format(epoch))))
                print("    Saved to:", save_destination)

            best_valid_loss = min(best_valid_loss, log_dict['loss'])

            if best_valid_loss < np.min(
                    get_logs('validation.loss')[-training['max_patience']:]):
                print(
                    'Early Stopping because validation loss did not improve for {} epochs'
                    .format(training['max_patience']))
                break

            if np.isnan(log_dict['loss']):
                print('Early Stopping because validation loss is nan')
                break

        # shutdown everything to avoid zombies
        coord.request_stop()
        session.run(train_inputs.queue.close(cancel_pending_enqueues=True))
        session.run(valid_inputs.queue.close(cancel_pending_enqueues=True))
        coord.join()

    # reset the graph
    tf.reset_default_graph()

    # gather best results
    best_valid_score = float(
        get_logs('validation.score')[best_valid_epoch - 1])
    best_valid_score_last = float(
        get_logs('validation.score_last')[best_valid_epoch - 1])

    best_valid_loss = float(get_logs('validation.loss')[best_valid_epoch - 1])
    best_valid_ub_loss = float(
        get_logs('validation.ub_loss')[best_valid_epoch - 1])

    best_valid_intra_loss = float(
        np.sum(get_logs('validation.others')[best_valid_epoch - 1][-dt:, 1]) /
        dt_s_loss_weights)
    best_valid_intra_ub_loss = float(
        np.sum(
            get_logs('validation.others_ub')[best_valid_epoch - 1][-dt:, 1]) /
        dt_s_loss_weights)

    best_valid_inter_loss = float(
        np.sum(get_logs('validation.others')[best_valid_epoch - 1][-dt:, 2]) /
        dt_s_loss_weights)
    best_valid_inter_ub_loss = float(
        np.sum(
            get_logs('validation.others_ub')[best_valid_epoch - 1][-dt:, 2]) /
        dt_s_loss_weights)

    best_valid_r_loss = float(
        get_logs('validation.r_loss')[best_valid_epoch - 1])
    best_valid_r_ub_loss = float(
        get_logs('validation.r_ub_loss')[best_valid_epoch - 1])

    best_valid_r_intra_loss = float(
        np.sum(get_logs('validation.r_others')[best_valid_epoch - 1][-dt:, 1])
        / dt_s_loss_weights)
    best_valid_r_intra_ub_loss = float(
        np.sum(
            get_logs('validation.r_others_ub')[best_valid_epoch - 1][-dt:, 1])
        / dt_s_loss_weights)

    best_valid_r_inter_loss = float(
        np.sum(get_logs('validation.r_others')[best_valid_epoch - 1][-dt:, 2])
        / dt_s_loss_weights)
    best_valid_r_inter_ub_loss = float(
        np.sum(
            get_logs('validation.r_others_ub')[best_valid_epoch - 1][-dt:, 2])
        / dt_s_loss_weights)

    return best_valid_score, best_valid_score_last, best_valid_loss, best_valid_ub_loss, best_valid_intra_loss, \
           best_valid_intra_ub_loss, best_valid_inter_loss, best_valid_inter_ub_loss, best_valid_r_loss, \
           best_valid_r_ub_loss, best_valid_r_intra_loss, best_valid_r_intra_ub_loss, best_valid_r_inter_loss, \
           best_valid_r_inter_ub_loss
示例#16
0
# Local
import constants
from debug import DEBUG_COMMAND_LIST
import dirs
import envvars
import gamedata
import utils
from localization import LOCALIZATION_DATA


# Minimum Python version check
if sys.version_info < (3, 6):
    sys.exit("The White Rabbit does not support Python versions below 3.6. Please install a newer version")

# Clear .pkl files on startup to avoid export bug
utils.delete_files(dirs.FONT_DIR, "pkl")

# Enable Server Members gateway intent to find all users
intents = discord.Intents.all()

bot = commands.Bot(command_prefix=commands.when_mentioned_or(constants.COMMAND_PREFIX), intents=intents)
bot.games = {}

# Localization
BOT_CHANNEL = LOCALIZATION_DATA["channels"]["bot-channel"]
SPECTATOR_ROLE = LOCALIZATION_DATA["spectator-role"]


@bot.event
async def on_ready():
    # Set custom status
示例#17
0
文件: nem.py 项目: zsh1993/Neural-EM
def run(net_path, training, validation, nem, seed, log_dir, _run):

    # clear debug dir
    if log_dir and net_path is None:
        utils.create_directory(log_dir)
        utils.delete_files(log_dir, recursive=True)

    # Set up data pipelines
    nr_iters = nem['nr_steps'] + 1
    train_inputs = InputPipeLine('training',
                                 shuffle=True,
                                 sequence_length=nr_iters,
                                 batch_size=training['batch_size'])
    valid_inputs = InputPipeLine('validation',
                                 shuffle=False,
                                 sequence_length=nr_iters,
                                 batch_size=validation['batch_size'])

    # Build Graph
    train_op, train_graph, valid_graph, debug_graph = build_graphs(
        train_inputs.output, valid_inputs.output)
    init = tf.global_variables_initializer()

    # print vars
    utils.print_vars(tf.trainable_variables())

    with tf.Session() as session:
        tf.set_random_seed(seed)

        # continue training from net_path if specified.
        saver = tf.train.Saver()
        if net_path is not None:
            saver.restore(session, net_path)
        else:
            session.run(init)

        # start training pipelines
        coord = tf.train.Coordinator()
        train_enqueue_thread = threading.Thread(target=train_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(train_enqueue_thread)
        train_enqueue_thread.start()
        valid_enqueue_thread = threading.Thread(target=valid_inputs.enqueue,
                                                args=[session, coord])
        coord.register_thread(valid_enqueue_thread)
        valid_enqueue_thread.start()

        best_valid_loss = np.inf
        best_valid_epoch = 0
        for epoch in range(1, training['max_epoch'] + 1):

            t = time.time()
            train_loss, others, train_scores, train_ub_loss_last = run_epoch(
                session,
                train_inputs,
                train_graph,
                debug_graph,
                training['debug_samples'],
                "train_e{}".format(epoch),
                train_op=train_op)

            add_log('training.loss', train_loss)
            add_log('training.others', others)
            add_log('training.score', train_scores[0])
            add_log('training.score_last', train_scores[1])
            add_log('training.ub_loss_last', train_ub_loss_last)

            create_curve_plots('train_loss', get_logs('training.loss'),
                               [0, 2000])

            print(
                "Epoch: %d Train Loss: %.3f, ARI: %.3f (conf: %0.3f), Last ARI: %.3f (conf: %.3f) took %.3fs"
                % (epoch, train_loss, train_scores[0], train_scores[2],
                   train_scores[1], train_scores[3], time.time() - t))
            print("    Other Train Losses:      ({})".format(", ".join(
                ["%.2f" % o for o in others.mean(0)])))
            print("    Train Loss UB last: %.2f" % train_ub_loss_last)

            t = time.time()
            valid_loss, others, valid_scores, valid_ub_loss_last = run_epoch(
                session, valid_inputs, valid_graph, debug_graph,
                validation['debug_samples'], "valid_e{}".format(epoch))

            # valid_scores = seq_ARI, last_ARI, seq_conf, last_conf
            add_log('validation.loss', valid_loss)
            add_log('validation.others', others)
            add_log('validation.score', valid_scores[0])
            add_log('validation.score_last', valid_scores[1])
            add_log('validation.ub_loss_last', valid_ub_loss_last)

            create_curve_plots('valid_loss', get_logs('validation.loss'),
                               [0, 2000])
            create_curve_plots('valid_score', get_logs('validation.score'),
                               [0, 1])
            create_curve_plots('valid_score_last',
                               get_logs('validation.score_last'), [0, 1])

            print(
                "    Validation Loss: %.3f, ARI: %.3f (conf: %0.3f), Last ARI: %.3f (conf: %.3f) took %.3fs"
                % (valid_loss, valid_scores[0], valid_scores[2],
                   valid_scores[1], valid_scores[3], time.time() - t))
            print("    Other Validation Losses: ({})".format(", ".join(
                ["%.2f" % o for o in others.mean(0)])))
            print("    Valid Loss UB last: %.2f" % valid_ub_loss_last)

            if valid_loss < best_valid_loss:
                best_valid_loss = valid_loss
                best_valid_epoch = epoch
                _run.result = float(valid_scores[0]), float(
                    valid_scores[1]), float(valid_loss)
                print("    Best validation loss improved to %.03f" %
                      best_valid_loss)
                save_destination = saver.save(
                    session, os.path.abspath(os.path.join(log_dir, 'best')))
                print("    Saved to:", save_destination)
            if epoch in training['save_epochs']:
                save_destination = saver.save(
                    session,
                    os.path.abspath(
                        os.path.join(log_dir, 'epoch_{}'.format(epoch))))
                print("    Saved to:", save_destination)
            best_valid_loss = min(best_valid_loss, valid_loss)
            if best_valid_loss < np.min(
                    get_logs('validation.loss')[-training['max_patience']:]):
                print(
                    'Early Stopping because validation loss did not improve for {} epochs'
                    .format(training['max_patience']))
                break

            if np.isnan(valid_loss):
                print('Early Stopping because validation loss is nan')
                break

        # shutdown everything to avoid zombies
        coord.request_stop()
        session.run(train_inputs.queue.close(cancel_pending_enqueues=True))
        session.run(valid_inputs.queue.close(cancel_pending_enqueues=True))
        coord.join()

    return float(get_logs('validation.score')[best_valid_epoch - 1]), float(
        get_logs('validation.score_last')[best_valid_epoch - 1]), float(
            get_logs('validation.loss')[best_valid_epoch - 1])
示例#18
0
def run(record_grouping_score, record_relational_loss, feed_actions, net_path,
        training, validation, nem, dt, seed, log_dir, _run):

    if torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    save_epochs = training['save_epochs']

    # clear debug dir
    if log_dir and net_path is None:
        utils.create_directory(log_dir)
        utils.delete_files(log_dir, recursive=True)

    # prep weights for print out
    loss_step_weights = get_loss_step_weights()
    s_loss_weights = np.sum(loss_step_weights)
    dt_s_loss_weights = np.sum(loss_step_weights[-dt:])

    # Set up data pipelines
    nr_iters = nem['nr_steps'] + 1
    out_list = ['features']
    out_list.append('groups') if record_grouping_score else None
    out_list.append(record_relational_loss) if record_relational_loss else None
    out_list.append('actions') if feed_actions else None

    train_dataset = InputDataset("training",
                                 training['batch_size'],
                                 out_list,
                                 sequence_length=nem['nr_steps'] + 1)
    valid_dataset = InputDataset("validation",
                                 validation['batch_size'],
                                 out_list,
                                 sequence_length=nem['nr_steps'] + 1)
    train_data_loader = DataLoader(dataset=train_dataset,
                                   batch_size=1,
                                   shuffle=False,
                                   num_workers=training['num_workers'],
                                   collate_fn=collate)
    valid_data_loader = DataLoader(dataset=valid_dataset,
                                   batch_size=1,
                                   shuffle=False,
                                   num_workers=training['num_workers'],
                                   collate_fn=collate)

    # Get dimensions
    input_shape = train_dataset._data_in_file['features'].shape
    W, H, C = list(input_shape)[-3:]

    inner_cell = R_NEM(nem['k'])
    nem_cell = NEMCell(inner_cell,
                       input_shape=(W, H, C),
                       distribution=nem['pixel_dist'])
    optimizer = set_up_optimizer(
        list(nem_cell.parameters()) + list(inner_cell.parameters()))

    best_valid_loss = np.inf
    best_valid_epoch = 0
    for epoch in range(1, training['max_epoch'] + 1):
        # run train epoch
        t = time.time()
        log_dict = run_epoch(nem_cell,
                             optimizer,
                             train_data_loader,
                             train=True)

        # log all items in dict
        log_log_dict('training', log_dict)

        # produce print-out
        print("\n" + 80 * "%" + "    EPOCH {}   ".format(epoch) + 80 * "%")
        print_log_dict(log_dict, 'Train', t, dt, s_loss_weights,
                       dt_s_loss_weights)

        # run valid epoch
        t = time.time()
        log_dict = run_val_epoch(nem_cell, optimizer, valid_data_loader)

        # add logs
        log_log_dict('validation', log_dict)

        # produce plots
        create_curve_plots(
            'loss', {
                'training': get_logs('training.loss'),
                'validation': get_logs('validation.loss')
            }, [0, 1000], [0, 200])
        create_curve_plots(
            'r_loss', {
                'training': get_logs('training.r_loss'),
                'validation': get_logs('validation.r_loss')
            }, [0, 100], [0, 20])

        create_curve_plots(
            'score', {
                'training': get_logs('training.score'),
                'validation': get_logs('validation.score')
            }, [0, 1], None)

        # produce print-out
        print("\n")
        print_log_dict(log_dict, 'Validation', t, dt, s_loss_weights,
                       dt_s_loss_weights)

        if log_dict['loss'] < best_valid_loss:
            best_valid_loss = log_dict['loss']
            best_valid_epoch = epoch
            _run.result = float(log_dict['score']), \
                          float(log_dict['loss']), float(log_dict['ub_loss']),  float(log_dict['r_loss']), float(log_dict['r_ub_loss'])

            #float(np.sum(log_dict['others'][-dt:, 1])/dt_s_loss_weights), \
            #float(np.sum(log_dict['others_ub'][-dt:, 1]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['others'][-dt:, 2]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['others_ub'][-dt:, 2]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['r_others'][-dt:, 1]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['r_others_ub'][-dt:, 1]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['r_others'][-dt:, 2]) / dt_s_loss_weights), \
            #float(np.sum(log_dict['r_others_ub'][-dt:, 2]) / dt_s_loss_weights)

            print("    Best validation loss improved to %.03f" %
                  best_valid_loss)
            torch.save(nem_cell.state_dict(),
                       os.path.abspath(os.path.join(log_dir, 'best')))
            print("    Saved to:",
                  os.path.abspath(os.path.join(log_dir, 'best')))
        if epoch in save_epochs:
            torch.save(
                nem_cell.state_dict(),
                os.path.abspath(os.path.join(log_dir,
                                             'epoch_{}'.format(epoch))))
            print(
                "    Saved to:",
                os.path.abspath(os.path.join(log_dir,
                                             'epoch_{}'.format(epoch))))

        best_valid_loss = min(best_valid_loss, log_dict['loss'])

        if best_valid_loss < np.min(
                get_logs('validation.loss')[-training['max_patience']:]):
            print(
                'Early Stopping because validation loss did not improve for {} epochs'
                .format(training['max_patience']))
            break

        if np.isnan(log_dict['loss']):
            print('Early Stopping because validation loss is nan')
            break

    # gather best results
    best_valid_score = float(
        get_logs('validation.score')[best_valid_epoch - 1])
    #best_valid_score_last = float(get_logs('validation.score_last')[best_valid_epoch - 1])

    best_valid_loss = float(get_logs('validation.loss')[best_valid_epoch - 1])
    best_valid_ub_loss = float(
        get_logs('validation.ub_loss')[best_valid_epoch - 1])

    #best_valid_intra_loss = float(np.sum(get_logs('validation.others')[best_valid_epoch - 1][-dt:, 1])/dt_s_loss_weights)
    #best_valid_intra_ub_loss = float(np.sum(get_logs('validation.others_ub')[best_valid_epoch - 1][-dt:, 1])/dt_s_loss_weights)

    #best_valid_inter_loss = float(np.sum(get_logs('validation.others')[best_valid_epoch - 1][-dt:, 2])/dt_s_loss_weights)
    #best_valid_inter_ub_loss = float(np.sum(get_logs('validation.others_ub')[best_valid_epoch - 1][-dt:, 2])/dt_s_loss_weights)

    best_valid_r_loss = float(
        get_logs('validation.r_loss')[best_valid_epoch - 1])
    best_valid_r_ub_loss = float(
        get_logs('validation.r_ub_loss')[best_valid_epoch - 1])

    #best_valid_r_intra_loss = float(np.sum(get_logs('validation.r_others')[best_valid_epoch - 1][-dt:, 1])/dt_s_loss_weights)
    #best_valid_r_intra_ub_loss = float(np.sum(get_logs('validation.r_others_ub')[best_valid_epoch - 1][-dt:, 1])/dt_s_loss_weights)

    #best_valid_r_inter_loss = float(np.sum(get_logs('validation.r_others')[best_valid_epoch - 1][-dt:, 2])/dt_s_loss_weights)
    #best_valid_r_inter_ub_loss = float(np.sum(get_logs('validation.r_others_ub')[best_valid_epoch - 1][-dt:, 2])/dt_s_loss_weights)

    return best_valid_score, best_valid_loss, best_valid_ub_loss, \
           best_valid_r_loss, best_valid_r_ub_loss