예제 #1
0
def toggle_active():
    if not config.system_active:
        startup()
        config.process = subprocess.Popen('./sensor.py')
        config.system_active = True
    else:
        reset()
        config.process.kill()
        config.system_active = False
        config.alerts_triggered = 0

    return redirect(url_for('index'))
예제 #2
0
    def reset(self, path):
        utils.reset(path)
        utils.set_stack(self.quantity)
        utils.set_timer(self.timer)

        self.infos = dict()
        start_time = utils.get_start_time()
        assert start_time != -1
        self.infos["start_time"] = start_time

        for _, agent in self.agents.items():
            agent.update_eps()
    def updateParkspot(self, total_places_count, occupied_places_count):
        print("Update parkspot data")
        parkspotdata = swagger_client.UpdateParkspotStatus()
        parkspotdata.local_ip = getLocalIP()
        parkspotdata.total_places_count = total_places_count
        parkspotdata.occupied_places_count = occupied_places_count
        try:
            api_response = self.api_instance.update_parkspotnodes_status(
                self.info["token"], parkspotdata, self.info["parkspot_id"])
            if api_response.error.status == 404:
                print("Could not update parkspotdata. Resetting")
                reset()

            return True
        except ApiException as e:
            return False
def main():
    info = {"parkspot_id": "", "token": ""}

    # Load Parkspot ID and Token
    infoPath = Path(settings.infoFile)
    if infoPath.is_file():
        info = (pickle.load(open(settings.infoFile, "rb")))

    tokenAvailable = True
    if (info['token'] == ""):
        tokenAvailable = False

    networkAvailable = hasNetwork()

    if networkAvailable is False or tokenAvailable is False:
        setupParkspot(info)

    api = ParkspotApi(info)

    # Create a new parkspot if we did not create one yet
    if info["parkspot_id"] == "":
        if api.createParkspot() is False:
            print("Could not create Parkspot. Resetting")
            reset()
        pickle.dump(info, open(settings.infoFile, "w+b"))

    # Tell the backend our IP
    if api.updateLocalIp() is False:
        print("Could update IP. Resetting")
        shutdown()

    # Initialize the camera and the model
    print("Starting model")
    cam = Camera(usePiCamera=False, resolution=(1280, 720))
    model = CNN(cam, 'cnn/model/complexCNN.h5', api)

    # Create the parkspot grpc service
    service = ParkspotService(cam, model, api)

    # Run model
    while True:
        model.detect()
        time.sleep(5)

    # When everything is done, release everything
    cv2.destroyAllWindows()
    service.stop()
예제 #5
0
def check():

    while True:
        if config.debug:
            logging.debug('checking metrics...')
        for plugin in plugins:
            for target in plugin.targets:
                for metric in target.metrics:
                    if metric.value is None:
                        update_metric(metric)
                    value = metric.value
                    if target.min <= value <= target.max:
                        reset(metric)
                    else:
                        if metric.retry < target.retry:
                            metric.retry += 1
                    if metric.retry == 3:
                        messages.append((target, deepcopy(metric)))
#                        metric.retry = 0 # re-schedule
        time.sleep(10) # check interval
    def onSettingsChanged(self):
        # Monitor emby settings
        currentPath = utils.settings('useDirectPaths')
        if utils.window('emby_pluginpath') != currentPath:
            # Plugin path value changed. Offer to reset
            self.logMsg("Changed to playback mode detected", 1)
            utils.window('emby_pluginpath', value=currentPath)
            resp = xbmcgui.Dialog().yesno(
                                heading="Playback mode change detected",
                                line1=(
                                    "Detected the playback mode has changed. The database "
                                    "needs to be recreated for the change to be applied. "
                                    "Proceed?"))
            if resp:
                utils.reset()

        currentLog = utils.settings('logLevel')
        if utils.window('emby_logLevel') != currentLog:
            # The log level changed, set new prop
            self.logMsg("New log level: %s" % currentLog, 1)
            utils.window('emby_logLevel', value=currentLog)
def main():
    ################ load ###################
    #train_agent
    actor_train_path = os.path.abspath(
        os.curdir) + '/Generate_Traffic_Flow_MAS_RL/weight/AC_TD3_actor.pkl'
    critic_train_path = os.path.abspath(
        os.curdir) + '/Generate_Traffic_Flow_MAS_RL/weight/AC_TD3_critic.pkl'
    if os.path.exists(actor_train_path):
        actor_train = Actor(state_size, action_size).to(device)
        actor_train.load_state_dict(torch.load(actor_train_path))
        print('Actor_Train Model loaded')
    else:
        actor_train = Actor(state_size, action_size).to(device)
    if os.path.exists(critic_train_path):
        critic_train = Critic(state_size, action_size).to(device)
        critic_train.load_state_dict(torch.load(critic_train_path))
        print('Critic_Train Model loaded')
    else:
        critic_train = Critic(state_size, action_size).to(device)
    critic_next_train = Critic(state_size, action_size).to(device)
    critic_next_train.load_state_dict(critic_train.state_dict())
    #agents
    actor_path = os.path.abspath(
        os.curdir) + '/Generate_Traffic_Flow_MAS_RL/weight/AC_TD_MAS_actor.pkl'
    if os.path.exists(actor_path):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')

    print("Waiting for GAMA...")

    ################### initialization ########################
    reset()

    episode = 0

    training_stage = 65

    lr = 0.0001

    sample_lr = [
        0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
        0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
        0.000004, 0.000003, 0.000002, 0.000001
    ]
    if episode > training_stage:  #50 100
        try:
            lr = sample_lr[int(episode // training_stage)] * 0.01
        except (IndexError):
            lr = 0.000001 * 0.9  #* (0.9 ** ((episode-1000) // 60))

    optimizerA = optim.Adam(actor_train.parameters(), lr, betas=(0.95, 0.999))
    optimizerC = optim.Adam(critic_train.parameters(), lr, betas=(0.95, 0.999))

    values = []
    rewards = []
    masks = []
    total_loss = []
    total_rewards = []
    loss = []
    average_speed = []

    value = 0
    gama = 0.9
    over = 0
    log_prob = 0
    memory = Memory()

    A_T, state, reward, done, time_pass, over, average_speed_NPC = GAMA_connect(
    )
    print("Connected")
    ##################  start  #########################
    while over != 1:
        #training_agent
        if A_T == 0:
            #普通の場合
            average_speed.append(state[0])
            if (done == 0 and time_pass != 0):
                #前回の報酬
                reward = torch.tensor([reward],
                                      dtype=torch.float,
                                      device=device)
                rewards.append(reward)
                state = torch.DoubleTensor(state).reshape(
                    1, state_size).to(device)
                state_img = generate_img_train()
                tensor_cv = torch.from_numpy(np.transpose(
                    state_img, (2, 0, 1))).double().to(device) / 255
                if len(memory.states_next) == 0:
                    #for _ in range(3):
                    memory.states_next = memory.states
                    memory.states_next[2] = state
                    memory.states_img_next = memory.states_img
                    memory.states_img_next[2] = tensor_cv
                else:
                    del memory.states_next[:1]
                    del memory.states_img_next[:1]
                    memory.states_next.append(state)
                    memory.states_img_next.append(tensor_cv)

                state_next = torch.stack(
                    memory.states_next).to(device).detach()
                tensor_cv_next = torch.stack(
                    memory.states_img_next).to(device).detach()
                value_next, _, _, _ = critic_next_train(
                    state_next, tensor_cv_next, h_state_cv_c, h_state_n_c,
                    h_state_3_c)  #_next
                with torch.autograd.set_detect_anomaly(True):
                    # TD:r(s) +  gama*v(s+1) - v(s)
                    advantage = reward.detach(
                    ) + gama * value_next.detach() - value
                    actor_loss = -(log_prob * advantage.detach())
                    critic_loss = (reward.detach() +
                                   gama * value_next.detach() - value).pow(2)
                    optimizerA.zero_grad()
                    optimizerC.zero_grad()
                    critic_loss.backward()
                    actor_loss.backward()
                    loss.append(critic_loss)
                    optimizerA.step()
                    optimizerC.step()
                    critic_next_train.load_state_dict(
                        critic_train.state_dict())

                del memory.states[:1]
                del memory.states_img[:1]
                memory.states.append(state)
                memory.states_img.append(tensor_cv)
                state = torch.stack(memory.states).to(device).detach()
                tensor_cv = torch.stack(memory.states_img).to(device).detach()
                value, h_state_cv_c, h_state_n_c, h_state_3_c = critic_train(
                    state, tensor_cv, h_state_cv_c, h_state_n_c, h_state_3_c)
                action, log_prob = actor_train(state, tensor_cv)
                log_prob = log_prob.unsqueeze(0)

                send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])  #行
                masks.append(
                    torch.tensor([1 - done], dtype=torch.float, device=device))
                values.append(value)

            # 終わり
            elif done == 1:
                average_speed.append(state[0])
                send_to_GAMA([[1, 0]])
                #先传后计算
                print(state)
                rewards.append(reward)  #contains the last
                reward = torch.tensor([reward],
                                      dtype=torch.float,
                                      device=device)
                rewards.append(reward)  #contains the last
                total_reward = sum(rewards).cpu().detach().numpy()
                total_rewards.append(total_reward)

                with torch.autograd.set_detect_anomaly(True):
                    advantage = reward.detach(
                    ) - value  #+ last_value   最后一回的V(s+1) = 0
                    actor_loss = -(log_prob * advantage.detach())
                    critic_loss = (reward.detach() - value).pow(
                        2)  #+ last_value

                    optimizerA.zero_grad()
                    optimizerC.zero_grad()

                    critic_loss.backward()
                    actor_loss.backward()
                    loss.append(critic_loss)

                    optimizerA.step()
                    optimizerC.step()

                    critic_next_train.load_state_dict(
                        critic_train.state_dict())

                print(
                    "----------------------------------Net_Trained---------------------------------------"
                )
                print('--------------------------Iteration:', episode,
                      'over--------------------------------')
                episode += 1
                values = []
                rewards = []
                loss_sum = sum(loss).cpu().detach().numpy()
                total_loss.append(loss_sum)
                cross_loss_curve(loss_sum.squeeze(0), total_reward,
                                 save_curve_pic, save_critic_loss, save_reward,
                                 np.mean(average_speed), save_speed,
                                 average_speed_NPC, save_NPC_speed)
                #total_loss,total_rewards#np.mean(average_speed)/10
                loss = []
                average_speed = []
                memory.clear_memory()

                torch.save(actor_train.state_dict(), actor_train_path)
                torch.save(critic_train.state_dict(), critic_train_path)

                if episode > training_stage:  #50 100
                    try:
                        lr = sample_lr[int(episode // training_stage)] * 0.01
                    except (IndexError):
                        lr = 0.000001 * 0.9  #* (0.9 ** ((episode-1000) // 60))

                optimizerA = optim.Adam(actor_train.parameters(),
                                        lr,
                                        betas=(0.95, 0.999))
                optimizerC = optim.Adam(critic_train.parameters(),
                                        lr,
                                        betas=(0.95, 0.999))

            #最初の時
            if time_pass == 0:
                print('Iteration:', episode, "lr:", lr)
                state = np.reshape(state, (1, len(state)))
                state_img = generate_img_train()
                tensor_cv = torch.from_numpy(np.transpose(
                    state_img, (2, 0, 1))).double().to(device) / 255
                state = torch.DoubleTensor(state).reshape(
                    1, state_size).to(device)

                for _ in range(3):
                    memory.states.append(state)
                    memory.states_img.append(tensor_cv)

                state = torch.stack(memory.states).to(device).detach()  ###
                tensor_cv = torch.stack(memory.states_img).to(device).detach()
                value, h_state_cv_c, h_state_n_c, h_state_3_c = critic_train(
                    state,
                    tensor_cv)  #dist,  # now is a tensoraction = dist.sample()
                action, log_prob = actor_train(state, tensor_cv)
                print("acceleration: ", action.cpu().numpy())
                send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])

        #agents
        if A_T == 1:
            state = [
                torch.DoubleTensor(elem).reshape(1, state_size).to(device)
                for elem in state
            ]
            state = torch.stack(state).to(device).detach()
            tensor_cv_MAS = generate_img()
            tensor_cv_MAS = [
                torch.from_numpy(np.transpose(elem,
                                              (2, 0, 1))).double().to(device) /
                255 for elem in tensor_cv_MAS
            ]
            tensor_cv_MAS = torch.stack(tensor_cv_MAS).to(device).detach()

            action, _ = actor(state, tensor_cv_MAS)

            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])

        A_T, state, reward, done, time_pass, over, average_speed_NPC = GAMA_connect(
        )

    return None
예제 #8
0
def main():

    ############## Hyperparameters ##############
    update_timestep = 1  #TD use == 1 # update policy every n timesteps  set for TD
    K_epochs = 4  # update policy for K epochs  lr太大会出现NAN?
    eps_clip = 0.2
    gamma = 0.9

    episode = 512
    sample_lr = [
        0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
        0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
        0.000004, 0.000003, 0.000002, 0.000001
    ]
    lr = 0.0001  #random_seed = None
    state_dim = 5
    action_dim = 1
    #(self, state_dim, action_dim, lr, betas, gamma, K_epochs, eps_clip)
    actor_path = os.getcwd(
    ) + '/PPO_Mixedinput_Navigation_Model/weight/ppo_TD2lstm_actor.pkl'
    critic_path = os.getcwd(
    ) + '/PPO_Mixedinput_Navigation_Model/weight/ppo_TD2lstm_critic.pkl'
    ################ load ###################
    if episode > 50:  #50 100
        try:
            lr = sample_lr[int(episode // 50)]
        except (IndexError):
            lr = 0.000001

    ppo = PPO(state_dim, action_dim, lr, gamma, K_epochs, eps_clip)
    if os.path.exists(actor_path):
        ppo.actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    if os.path.exists(critic_path):
        ppo.critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    print("Waiting for GAMA...")

    ################### initialization ########################
    save_curve_pic = os.getcwd(
    ) + '/PPO_Mixedinput_Navigation_Model/result/PPO_2LSTM_loss_curve.png'
    save_critic_loss = os.getcwd(
    ) + '/PPO_Mixedinput_Navigation_Model/training_data/PPO_TD2_critic_loss.csv'
    save_reward = os.getcwd(
    ) + '/PPO_Mixedinput_Navigation_Model/training_data/PPO_TD2_reward.csv'
    reset()
    memory = Memory()

    advantages = 0  #global value
    loss = []
    total_loss = []
    rewards = []
    total_rewards = []
    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    #[real_speed/10, target_speed/10, elapsed_time_ratio, distance_left/100,distance_front_car/10,distance_behind_car/10,reward,done,over]
    print("done:", done, "timepass:"******"state ",state)
            rewards.append(reward)
            memory.rewards.append(reward)
            memory.is_terminals.append(done)
            state = torch.DoubleTensor(state).reshape(1, state_dim).to(device)
            state_img = generate_img()
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(device)
            if len(memory.states_next) == 0:
                for _ in range(3):
                    memory.states_next = memory.states
                    memory.states_next[2] = state
                    memory.states_img_next = memory.states_img
                    memory.states_img_next[2] = tensor_cv
            else:
                del memory.states_next[:1]
                del memory.states_img_next[:1]
                memory.states_next.append(state)
                memory.states_img_next.append(tensor_cv)
            loss_ = ppo.update(memory, lr, advantages, done)
            loss.append(loss_)
            del memory.logprobs[:]
            del memory.rewards[:]
            del memory.is_terminals[:]
            #memory.clear_memory()

            action = ppo.select_action(state, tensor_cv, memory)
            send_to_GAMA([[1, float(action * 10)]])
            #print("acceleration ",float(action))

        # 終わり
        elif done == 1:
            #先传后计算
            print("state_last", state)
            send_to_GAMA([[1, 0]])
            rewards.append(reward)

            del memory.states_next[:1]
            del memory.states_img_next[:1]
            state = torch.DoubleTensor(state).reshape(1, state_dim).to(
                device)  #转化成1行
            memory.states_next.append(state)
            state_img = generate_img()
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(device)
            memory.states_img_next.append(tensor_cv)

            memory.rewards.append(reward)
            memory.is_terminals.append(done)
            loss_ = ppo.update(memory, lr, advantages, done)
            loss.append(loss_)
            memory.clear_memory()

            print(
                "----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            total_reward = sum(rewards)
            total_rewards.append(total_reward)
            cross_loss_curve(loss_sum.squeeze(0), total_reward, save_curve_pic,
                             save_critic_loss, save_reward)
            rewards = []
            loss = []
            if episode > 50:  #50 100
                try:
                    lr = sample_lr[int(episode // 50)]
                except (IndexError):
                    lr = 0.000001
            torch.save(ppo.actor.state_dict(), actor_path)
            torch.save(ppo.critic.state_dict(), critic_path)

        #最初の時
        else:
            print('Iteration:', episode, "lr:", lr)
            state = torch.DoubleTensor(state).reshape(1, state_dim).to(device)
            state_img = generate_img(
            )  # numpy image: H x W x C (500, 500, 3) -> (3,500,500)
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(
                    device
                )  # np.transpose( xxx,  (2, 0, 1)) torch image: C x H x W
            action = ppo.select_action(state, tensor_cv, memory)
            print("acceleration: ", action)
            send_to_GAMA([[1, float(action * 10)]])

        state, reward, done, time_pass, over = GAMA_connect(test)

    return None
예제 #9
0
def main():
    ################ load ###################
    actor_path = os.path.abspath(
        os.curdir
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\weight\\AC_TD_actor.pkl'
    critic_path = os.path.abspath(
        os.curdir
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\weight\\AC_TD_critic.pkl'
    if os.path.exists(actor_path):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    if os.path.exists(critic_path):
        critic = Critic(state_size, action_size).to(device)
        critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    else:
        critic = Critic(state_size, action_size).to(device)
    critic_next = Critic(state_size, action_size).to(device)
    critic_next.load_state_dict(critic.state_dict())
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()

    episode = 237

    lr = 0.0001
    if episode > 50:
        lr = 0.00008
        new_lr = lr * (0.9**((episode - 40) // 10))
        #if episode > 110:
        #   lr = 0.000008
        #  new_lr = lr * (0.9 ** ((episode-90) // 10)) #40

    optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
    optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))

    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    print("done:", done, "timepass:"******"restart acceleration: 0")
            send_to_GAMA([[1, 0]])
            #先传后计算
            print(state)
            rewards.append(reward)  #contains the last
            reward = torch.tensor([reward], dtype=torch.float, device=device)
            rewards.append(reward)  #contains the last
            total_reward = sum(rewards).cpu().detach().numpy()
            total_rewards.append(total_reward)

            #state = torch.FloatTensor(state).reshape(1,4).to(device)
            #last_value= critic(state)

            with torch.autograd.set_detect_anomaly(True):
                advantage = reward.detach(
                ) - value  #+ last_value   最后一回的V(s+1) = 0
                actor_loss = -(log_prob * advantage.detach())
                critic_loss = (reward.detach() - value).pow(2)  #+ last_value
                lstm_loss = critic_loss

                optimizerA.zero_grad()
                optimizerC.zero_grad()

                critic_loss.backward()
                actor_loss.backward()
                loss.append(critic_loss)

                optimizerA.step()
                optimizerC.step()

                critic_next.load_state_dict(critic.state_dict())

            print(
                "----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            log_probs = []
            values = []
            rewards = []
            masks = []
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            cross_loss_curve(loss_sum.squeeze(0), total_reward, save_curve_pic,
                             save_critic_loss,
                             save_reward)  #total_loss,total_rewards
            loss = []
            if episode > 50:
                lr = 0.00008
                new_lr = lr * (0.9**((episode - 40) // 10))
                #if episode > 110:
                #   lr = 0.000008
                #  new_lr = lr * (0.9 ** ((episode-90) // 10)) #40
                optimizerA = optim.Adam(actor.parameters(),
                                        new_lr,
                                        betas=(0.95, 0.999))
                optimizerC = optim.Adam(critic.parameters(),
                                        new_lr,
                                        betas=(0.95, 0.999))

            torch.save(actor.state_dict(), actor_path)
            torch.save(critic.state_dict(), critic_path)

        #最初の時
        else:
            print('Iteration:', episode)
            state = np.reshape(state, (1, len(state)))  #xxx
            state_img = generate_img()
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(device)
            state = torch.DoubleTensor(state).reshape(1, 6).to(device)
            value = critic(
                state,
                tensor_cv)  #dist,  # now is a tensoraction = dist.sample()
            action, log_prob, entropy = actor(state, tensor_cv)
            print("acceleration: ", action.cpu().numpy())
            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
            log_prob = log_prob.unsqueeze(0)
            entropy += entropy

        state, reward, done, time_pass, over = GAMA_connect(test)
    return None
def main():

    ############## Hyperparameters ##############
    update_timestep = 1  #TD use == 1 # update policy every n timesteps  set for TD
    K_epochs = 2  # update policy for K epochs  lr太大会出现NAN?
    eps_clip = 0.2
    gamma = 0.9

    episode = 376

    lr_first = 0.0001
    lr = lr_first  #random_seed = None
    state_dim = 6
    action_dim = 1
    #(self, state_dim, action_dim, lr, betas, gamma, K_epochs, eps_clip)
    actor_path = os.getcwd(
    ) + '\\GAMA_python\\PPO_Navigation_Model\\weight\\ppo_TD_actor.pkl'
    critic_path = os.getcwd(
    ) + '\\GAMA_python\\PPO_Navigation_Model\\weight\\ppo_TD_critic.pkl'
    ################ load ###################
    if episode > 70:  #50 100
        lr_first = 0.00001
        lr = lr_first * (0.65**((episode - 60) // 10))
    ppo = PPO(state_dim, action_dim, lr, gamma, K_epochs, eps_clip)
    if os.path.exists(actor_path):
        ppo.actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    if os.path.exists(critic_path):
        ppo.critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    print("Waiting for GAMA...")

    ################### initialization ########################
    save_curve_pic = os.getcwd(
    ) + '\\GAMA_python\\PPO_Navigation_Model\\result\\PPO_TD_loss_curve.png'
    save_critic_loss = os.getcwd(
    ) + '\\GAMA_python\\PPO_Navigation_Model\\training_data\\PPO_TD_critic_loss.csv'
    save_reward = os.getcwd(
    ) + '\\GAMA_python\\PPO_Navigation_Model\\training_data\\PPO_TD_reward.csv'
    reset()
    memory = Memory()

    advantages = 0  #global value
    loss = []
    total_loss = []
    rewards = []
    total_rewards = []
    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    #[real_speed/10, target_speed/10, elapsed_time_ratio, distance_left/100,distance_front_car/10,distance_behind_car/10,reward,done,over]
    print("done:", done, "timepass:"******"state_last", state)
            send_to_GAMA([[1, 0]])
            rewards.append(reward)

            state = torch.DoubleTensor(state).reshape(1, 6).to(device)  #转化成1行
            memory.states_next.append(state)
            memory.rewards.append(reward)
            memory.is_terminals.append(done)
            loss_ = ppo.update(memory, lr, advantages, done)
            loss.append(loss_)
            memory.clear_memory()

            print(
                "----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            total_reward = sum(rewards)
            total_rewards.append(total_reward)
            cross_loss_curve(loss_sum.squeeze(0), total_reward, save_curve_pic,
                             save_critic_loss, save_reward)
            rewards = []
            loss = []
            if episode > 70:  #50 100
                lr_first = 0.00001
                lr = lr_first * (0.65**((episode - 60) // 10))  #40 90
            torch.save(ppo.actor.state_dict(), actor_path)
            torch.save(ppo.critic.state_dict(), critic_path)

        #最初の時
        else:
            print('Iteration:', episode)
            state = torch.DoubleTensor(state).reshape(1, 6).to(device)
            action = ppo.select_action(state, memory)
            print("acceleration: ", action)  #.cpu().numpy()
            send_to_GAMA([[1, float(action * 10)]])

        state, reward, done, time_pass, over = GAMA_connect(test)
    return None
예제 #11
0
def main():
    ################ load ###################
    actor_path = os.path.abspath(
        os.curdir) + '/PPO_Mixedinput_Navigation_Model/weight/AC_TD3_actor.pkl'
    critic_path = os.path.abspath(
        os.curdir
    ) + '/PPO_Mixedinput_Navigation_Model/weight/AC_TD3_critic.pkl'
    if os.path.exists(actor_path):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    if os.path.exists(critic_path):
        critic = Critic(state_size, action_size).to(device)
        critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    else:
        critic = Critic(state_size, action_size).to(device)
    critic_next = Critic(state_size, action_size).to(device)
    critic_next.load_state_dict(critic.state_dict())
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()

    episode = 4000
    training_stage = 70  #100#80
    Decay = training_stage * 18

    lr = 0.0001
    sample_lr = [
        0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
        0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
        0.000004, 0.000003, 0.000002, 0.000001
    ]  #900 960 1020 1080 1140
    if episode >= training_stage:  #50 100
        try:
            lr = sample_lr[int(episode // training_stage)]
        except (IndexError):
            lr = 0.000001 * (0.9**((episode - Decay // training_stage))
                             )  #100-1800#80-1440#65-1170#570 -- 30

    optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
    optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))

    test = "GAMA"
    state, reward, done, time_pass, over, _ = GAMA_connect(test)  #connect
    print("done:", done, "timepass:"******"----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1

        #最初の時
        else:
            print('Iteration:', episode, "lr:", lr)
            state = np.reshape(state, (1, len(state)))  #xxx
            state_img = generate_img()
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(device) / 255
            state = torch.DoubleTensor(state).reshape(1, state_size).to(device)

            for _ in range(Memory_size):
                memory.states.append(state)
                memory.states_img.append(tensor_cv)
            state = torch.stack(memory.states).to(device).detach()  ###
            tensor_cv = torch.stack(memory.states_img).to(device).detach()
            value, h_state_cv_c, h_state_n_c, h_state_3_c = critic(
                state,
                tensor_cv)  #dist,  # now is a tensoraction = dist.sample()
            action, log_prob, entropy = actor(
                state, tensor_cv)  #, h_state_cv_a,h_state_n_a,h_state_3_a
            print("acceleration: ", action.cpu().numpy())
            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
            log_prob = log_prob.unsqueeze(0)
            #entropy += entropy

        state, reward, done, time_pass, over, average_speed_NPC = GAMA_connect(
            test)
    return None
예제 #12
0
    def __init__(self):
        log.debug('Full sys.argv received: %s' % argv)
        # Parse parameters
        params = dict(parse_qsl(argv[2][1:]))
        mode = params.get('mode', '')
        itemid = params.get('id', '')

        if mode == 'play':
            self.play()

        elif mode == 'plex_node':
            self.play()

        elif mode == 'ondeck':
            entrypoint.getOnDeck(itemid,
                                 params.get('type'),
                                 params.get('tagname'),
                                 int(params.get('limit')))

        elif mode == 'recentepisodes':
            entrypoint.getRecentEpisodes(itemid,
                                         params.get('type'),
                                         params.get('tagname'),
                                         int(params.get('limit')))

        elif mode == 'nextup':
            entrypoint.getNextUpEpisodes(params['tagname'],
                                         int(params['limit']))

        elif mode == 'inprogressepisodes':
            entrypoint.getInProgressEpisodes(params['tagname'],
                                             int(params['limit']))

        elif mode == 'browseplex':
            entrypoint.browse_plex(key=params.get('key'),
                                   plex_section_id=params.get('id'))

        elif mode == 'getsubfolders':
            entrypoint.GetSubFolders(itemid)

        elif mode == 'watchlater':
            entrypoint.watchlater()

        elif mode == 'channels':
            entrypoint.channels()

        elif mode == 'settings':
            executebuiltin('Addon.OpenSettings(%s)' % v.ADDON_ID)

        elif mode == 'enterPMS':
            entrypoint.enterPMS()

        elif mode == 'reset':
            reset()

        elif mode == 'togglePlexTV':
            entrypoint.togglePlexTV()

        elif mode == 'resetauth':
            entrypoint.resetAuth()

        elif mode == 'passwords':
            passwordsXML()

        elif mode == 'switchuser':
            entrypoint.switchPlexUser()

        elif mode in ('manualsync', 'repair'):
            if window('plex_online') != 'true':
                # Server is not online, do not run the sync
                dialog('ok',
                       heading=lang(29999),
                       message=lang(39205))
                log.error('Not connected to a PMS.')
            else:
                if mode == 'repair':
                    window('plex_runLibScan', value='repair')
                    log.info('Requesting repair lib sync')
                elif mode == 'manualsync':
                    log.info('Requesting full library scan')
                    window('plex_runLibScan', value='full')

        elif mode == 'texturecache':
            window('plex_runLibScan', value='del_textures')

        elif mode == 'chooseServer':
            entrypoint.chooseServer()

        elif mode == 'refreshplaylist':
            log.info('Requesting playlist/nodes refresh')
            window('plex_runLibScan', value='views')

        elif mode == 'deviceid':
            self.deviceid()

        elif mode == 'fanart':
            log.info('User requested fanarttv refresh')
            window('plex_runLibScan', value='fanart')

        elif '/extrafanart' in argv[0]:
            plexpath = argv[2][1:]
            plexid = itemid
            entrypoint.getExtraFanArt(plexid, plexpath)
            entrypoint.getVideoFiles(plexid, plexpath)

        # Called by e.g. 3rd party plugin video extras
        elif ('/Extras' in argv[0] or '/VideoFiles' in argv[0] or
                '/Extras' in argv[2]):
            plexId = itemid or None
            entrypoint.getVideoFiles(plexId, params)

        else:
            entrypoint.doMainListing(content_type=params.get('content_type'))
예제 #13
0
def main():
    ############## Hyperparameters ##############
    K_epochs = 3  # update policy for K epochs  lr太大会出现NAN?
    eps_clip = 0.2
    gamma = 0.9  # 要较弱;较强关联? 对每一正确步也有打击

    episode = 3

    lr_first = 0.00001
    lr = lr_first  #random_seed = None
    state_dim = 6
    action_dim = 1
    #(self, state_dim, action_dim, lr, betas, gamma, K_epochs, eps_clip)
    actor_path = os.getcwd(
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\weight\\ppo_MC_actor.pkl'
    critic_path = os.getcwd(
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\weight\\ppo_MC_critic.pkl'
    ################ load ###################
    if episode > 30:  #50 100
        lr_first = 0.00001
        lr = lr_first * (0.7**((episode - 20) // 10))
    ppo = PPO(state_dim, action_dim, lr, gamma, K_epochs, eps_clip)
    if os.path.exists(actor_path):
        ppo.actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    if os.path.exists(critic_path):
        ppo.critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    print("Waiting for GAMA...")

    ################### initialization ########################
    save_curve_pic = os.getcwd(
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\result\\PPO_MC_loss_curve.png'
    save_critic_loss = os.getcwd(
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\training_data\\PPO_MC_critic_loss.csv'
    save_reward = os.getcwd(
    ) + '\\GAMA_python\\PPO_Mixedinput_Navigation_Model\\training_data\\PPO_MC_reward.csv'
    reset()
    memory = Memory()

    advantages = 0  #global value
    loss = []
    total_loss = []
    rewards = []
    total_rewards = []
    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    #[real_speed/10, target_speed/10, elapsed_time_ratio, distance_left/100,distance_front_car/10,distance_behind_car/10,reward,done,over]
    print("done:", done, "timepass:"******"----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            total_reward = sum(rewards)
            total_rewards.append(total_reward)
            cross_loss_curve(loss_sum.squeeze(0), total_reward, save_curve_pic,
                             save_critic_loss, save_reward)
            rewards = []
            loss = []
            if episode > 30:  #50 100
                lr = lr_first * (0.94**((episode - 20) // 10))
                #if episode > 80:
                #   lr_first = 0.0001
                #  lr = lr_first * (0.94 ** ((episode-70) // 10))
            torch.save(ppo.actor.state_dict(), actor_path)
            torch.save(ppo.critic.state_dict(), critic_path)

        #最初の時
        else:
            print('Iteration:', episode)
            state = torch.DoubleTensor(state).reshape(1, 6).to(device)
            state_img = generate_img(
            )  # numpy image: H x W x C (500, 500, 3) -> (3,500,500)
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(
                    device
                )  # np.transpose( xxx,  (2, 0, 1)) torch image: C x H x W
            action = ppo.select_action(state, tensor_cv, memory)
            print("acceleration: ", action)  #.cpu().numpy()
            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])

        state, reward, done, time_pass, over = GAMA_connect(test)
    return None
예제 #14
0
    DATA = list

    print list2

    for i, row in enumerate(DATA):
        for j, col in enumerate(row):
            booksheet.write(j, i, col)
    # workbook.save('./data/dos_tichu_2500_1250_625_xlst.xls')

    return [experiment] + np.mean(results, axis=0).tolist()


if __name__ == "__main__":

    reset()

    arguments = docopt(__doc__)

    pd.set_option("display.expand_frame_repr", False)

    pheno_path = "./data/phenotypes/Phenotypic_V1_0b_preprocessed949.csv"
    pheno = load_phenotypes(pheno_path)

    hdf5 = hdf5_handler("./data/abide_cc200_tichu.hdf5", "a")

    valid_derivatives = ["cc200", "aal", "ez", "ho", "tt", "dosenbach160"]
    derivatives = [
        derivative for derivative in arguments["<derivative>"]
        if derivative in valid_derivatives
    ]
예제 #15
0
def main(mac):
    cont = False
    next_ring = False
    utils.debug('\n\n******TDB TEST******\n')

    # Starts TDB server
    utils.debug('Starting TDB server')
    tdb.start_tdb_server(os.environ['BT_PATH'], os.environ['TDB_PATH'])
    time.sleep(0.05)

    try:
        # Run connect first as a workaround to make tdb work
        tdb.connect(mac)
        time.sleep(0.05)

        tdb_res = 0
        fail_str = ''
        pass_str = ''

        out_scan = tdb.start_scan()
        time.sleep(2)
        if 'ERROR' in out_scan:
            next_scan = False
            utils.debug('Error while enabling scan')
            fail_str += '\nenable-scan error'
        else:
            if ('set scan enabled' in out_scan):
                next_scan = True
                utils.debug('successfully enabled scan')
                tdb_res += 1
                pass_str += '\nenable-scan pass'
            else:
                next_scan = False
                utils.debug('failed to enable scan')
                fail_str += '\nenable-scan failed'

        if next_scan is True:
            out_conn = tdb.connect(mac)
            time.sleep(0.05)
            if 'ERROR' in out_conn:
                utils.debug('Error happens when connecting to tdb')
                next_conn = False
            else:
                if ('error' in out_conn) or ('connection to tdb server \
                                             failed' in out_conn):
                    utils.debug('connection to tdb server failed')
                    next_conn = False
                elif ('connection open!' in out_conn):
                    utils.debug('successfully connected to tdb server')
                    next_conn = True
                    tdb_res += 1
                    pass_str += '\ntdb connect pass'

        if next_conn is False:
            for i in range(retries):
                try:
                    utils.debug('Loop: ' + str(i + 1))
                    utils.debug('Resetting DUT ...')
                    utils.reset(os.environ['nrfjprog_PATH'])
                    time.sleep(5)
                    tdb.start_tdb_server(os.environ['BT_PATH'],
                                         os.environ['TDB_PATH'])
                    tdb.connect(mac)
                    time.sleep(0.05)
                    tdb.start_scan()
                    time.sleep(2)
                    out_conn = tdb.connect(mac)
                    time.sleep(0.05)
                    if ('connection open!' in out_conn):
                        utils.debug('successfully connected to tdb server \
                            after ' + str(i + 1) + 'retries')
                        next_conn = True
                        tdb_res += 1
                        pass_str += '\ntdb connect pass'
                        break
                except (subprocess.CalledProcessError) as detail:
                    traceback.format_exc()
                    print detail
                    utils.debug(str(detail))
                    if i < 2:
                        continue
                    else:
                        raise

        if next_conn is False:
            utils.debug('Failed to connect to tdb server after ' +
                        str(retries) + ' retries')
            fail_str += '\ntdb connect failed'
        else:
            out_stop_scan = tdb.stop_scan()
            time.sleep(0.05)
            if 'ERROR' in out_stop_scan:
                utils.debug('Error while disabling scan')
                fail_str += '\ntdb disable-scan failed'
            else:
                tdb_res += 1
                pass_str += '\ntdb disable-scan pass'

            out_onb_start = tdb.onboard_start(mac)
            time.sleep(0.05)
            if 'ERROR' in out_onb_start:
                utils.debug('onboard-start error')
                cont = False
                fail_str += '\ntdb onboard-start error'
            else:
                if 'received response' in out_onb_start:
                    cont = True
                    utils.debug('onboard start OK')
                    tdb_res += 1
                    pass_str += '\ntdb onboard-start pass'
                else:
                    cont = False
                    utils.debug('Failed to start onboarding')
                    fail_str += '\nonboard-start failed'

        if cont is True:
            out_onb_conf = tdb.onboard_confirm(mac)
            time.sleep(0.05)
            if 'ERROR' in out_onb_conf:
                cont = False
                utils.debug('onboard confirm error')
                fail_str += '\ntdb onboard-confirm error'
            else:
                if ('Tag device id' in out_onb_conf):
                    cont = True
                    utils.debug('onboard confirm OK')
                    tdb_res += 1
                    pass_str += '\ntdb onboard-confirm pass'
                else:
                    cont = False
                    utils.debug('Failed to onboard confirm')
                    fail_str += '\ntdb onboard-confirm failed'

        if cont is True:
            out_onb_cancel = tdb.onboard_cancel(mac)
            time.sleep(0.05)
            if 'ERROR' in out_onb_cancel:
                cont = False
                utils.debug('Off onboarding error')
                fail_str += '\ntdb onboard-cancel error'
            else:
                if ('received response' in out_onb_cancel):
                    cont = True
                    utils.debug('off boarding OK')
                    tdb_res += 1
                    pass_str += '\ntdb onboard-cancel pass'
                else:
                    cont = False
                    utils.debug('off boarding failed')
                    fail_str += '\ntdb onboard-cancel failed'

        if next_conn is True:
            out_ble_param = tdb.ble_param(mac, ble_param)
            time.sleep(0.05)
            if 'ERROR' in out_ble_param:
                utils.debug('Error when setting ble param')
                fail_str += '\ntdb ble-param error'
            else:
                if ('error code: 0' in out_ble_param):
                    utils.debug('successfully setting ble param')
                    tdb_res += 1
                    pass_str += '\ntdb ble-param pass'
                else:
                    utils.debug('failed to set ble_param')
                    fail_str += '\ntdb ble-param failed'

        if next_conn is True:
            out_ping = tdb.ping(mac, ping_msg)
            time.sleep(0.05)
            if 'ERROR' in out_ping:
                utils.debug('Error when pinging')
                fail_str += '\ntdb ping error'
            else:
                if ('received pong msg="Ping!"' in out_ping):
                    utils.debug('ping OK')
                    tdb_res += 1
                    pass_str += '\ntdb ping pass'
                else:
                    utils.debug('ping failed')
                    fail_str += '\ntdb ping failed'

        if next_conn is True:
            out_ring = tdb.ring_start(mac)
            time.sleep(0.05)
            if 'ERROR' in out_ring:
                utils.debug('Error when ringing')
                next_ring = False
                fail_str += '\ntdb ring error'
            else:
                if ('Ring indication: error code: 0' in out_ring):
                    utils.debug('Ring OK')
                    next_ring = True
                    tdb_res += 1
                    pass_str += '\ntdb ring pass'
                else:
                    utils.debug('Ring failed')
                    next_ring = False
                    fail_str += '\ntdb ring failed'

        if next_ring is True:
            out_ring_stop = tdb.ring_stop(mac)
            time.sleep(0.05)
            if 'ERROR' in out_ring_stop:
                utils.debug('ring-stop error')
                fail_str += '\ntdb ring-stop error'
            else:
                if ('Ring indication: error code: 0' in out_ring_stop):
                    utils.debug('ring-stop OK')
                    tdb_res += 1
                    pass_str += '\ntdb ring-stop pass'
                else:
                    utils.debug('failed to stop ringing')
                    fail_str += '\ntdb ring-stop failed'
    except subprocess.CalledProcessError as err:
        traceback.format_exc()
        utils.debug(str(err))
        print err
    except KeyboardInterrupt:
        print '\nAborted by user\n'
        tdb.kill_tdb_server()
        time.sleep(0.05)
    finally:
        tdb.kill_tdb_server()
        time.sleep(0.05)

    if tdb_res == 10:
        utils.debug('All tdb commands PASS\n\n')
        utils.test_report('\n\nTDB test result:')
        utils.test_report('\nAll tdb commands PASS')
    else:
        utils.test_report('\n\nTDB test results: ')
        utils.test_report(pass_str + '\n')
        utils.test_report(fail_str + '\n\n')
예제 #16
0
from packet     import Packet
from random     import randint
from matplotlib import pyplot as plt
from utils      import reset, count_wins, report
from globals    import networks, SIZE, NTRIALS, PTIMES, STRATEGIES

NTIME_STEPS = 200
NSERVERS = SIZE**2

win_freqs = {strategy : [] for strategy in STRATEGIES}

if __name__ == '__main__':
  # Run SLB model simulation
  for ptime in PTIMES:
    for i in xrange(NTRIALS):
      reset(networks)
      for j in xrange(NTIME_STEPS):
        loc = randint(0, NSERVERS - 1)
        pkt = Packet('id-%d' % (j), ptime)
        for sim_obj in networks:
          network = sim_obj['obj']
          network.step()
          network.add(pkt, loc)
          sim_obj['dat'][ptime][i] += network.latency()

  # Count and report win frequencies
  for ptime in PTIMES:
    win_counts = {strategy : 0 for strategy in STRATEGIES}
    count_wins(networks, ptime, win_counts, NTRIALS)
    report(win_counts, ptime, NTRIALS)
    for strategy, count in win_counts.iteritems():
예제 #17
0
    def run_internal(self):

        startupComplete = False
        monitor = self.monitor

        self.logMsg("---===### Starting LibrarySync ###===---", 0)

        while not monitor.abortRequested():

            # In the event the server goes offline
            while self.suspend_thread:
                # Set in service.py
                if monitor.waitForAbort(5):
                    # Abort was requested while waiting. We should exit
                    break

            if (utils.window('emby_dbCheck') != "true" and
                    utils.settings('SyncInstallRunDone') == "true"):
                
                # Verify the validity of the database
                currentVersion = utils.settings('dbCreatedWithVersion')
                minVersion = utils.window('emby_minDBVersion')
                uptoDate = self.compareDBVersion(currentVersion, minVersion)

                if not uptoDate:
                    self.logMsg(
                        "Db version out of date: %s minimum version required: %s"
                        % (currentVersion, minVersion), 0)
                    
                    resp = xbmcgui.Dialog().yesno(
                                            heading="Db Version",
                                            line1=(
                                                "Detected the database needs to be "
                                                "recreated for this version of Emby for Kodi. "
                                                "Proceed?"))
                    if not resp:
                        self.logMsg("Db version out of date! USER IGNORED!", 0)
                        xbmcgui.Dialog().ok(
                                        heading="Emby for Kodi",
                                        line1=(
                                            "Emby for Kodi may not work correctly "
                                            "until the database is reset."))
                    else:
                        utils.reset()

                utils.window('emby_dbCheck', value="true")


            if not startupComplete:
                # Verify the video database can be found
                videoDb = utils.getKodiVideoDBPath()
                if not xbmcvfs.exists(videoDb):
                    # Database does not exists
                    self.logMsg(
                            "The current Kodi version is incompatible "
                            "with the Emby for Kodi add-on. Please visit "
                            "https://github.com/MediaBrowser/Emby.Kodi/wiki "
                            "to know which Kodi versions are supported.", 0)

                    xbmcgui.Dialog().ok(
                                    heading="Emby Warning",
                                    line1=(
                                        "Cancelling the database syncing process. "
                                        "Current Kodi versoin: %s is unsupported. "
                                        "Please verify your logs for more info."
                                        % xbmc.getInfoLabel('System.BuildVersion')))
                    break

                # Run start up sync
                self.logMsg("Db version: %s" % utils.settings('dbCreatedWithVersion'), 0)
                self.logMsg("SyncDatabase (started)", 1)
                startTime = datetime.now()
                librarySync = self.startSync()
                elapsedTime = datetime.now() - startTime
                self.logMsg(
                    "SyncDatabase (finished in: %s) %s"
                    % (str(elapsedTime).split('.')[0], librarySync), 1)
                # Only try the initial sync once per kodi session regardless
                # This will prevent an infinite loop in case something goes wrong.
                startupComplete = True

            # Process updates
            if utils.window('emby_dbScan') != "true":
                self.incrementalSync()

            if (utils.window('emby_onWake') == "true" and
                    utils.window('emby_online') == "true"):
                # Kodi is waking up
                # Set in kodimonitor.py
                utils.window('emby_onWake', clear=True)
                if utils.window('emby_syncRunning') != "true":
                    self.logMsg("SyncDatabase onWake (started)", 0)
                    librarySync = self.startSync()
                    self.logMsg("SyncDatabase onWake (finished) %s", librarySync, 0)

            if self.stop_thread:
                # Set in service.py
                self.logMsg("Service terminated thread.", 2)
                break

            if monitor.waitForAbort(1):
                # Abort was requested while waiting. We should exit
                break

        self.logMsg("###===--- LibrarySync Stopped ---===###", 0)
예제 #18
0
파일: nn.py 프로젝트: zfyyfz12/AIMAFE
def run_autoencoder2(experiment,
                     X_train,
                     y_train,
                     X_valid,
                     y_valid,
                     X_test,
                     y_test,
                     model_path,
                     prev_model_path,
                     code_size=1250,
                     prev_code_size=2500):
    if os.path.isfile(model_path) or \
       os.path.isfile(model_path + ".meta"):
        return
    prev_model = ae(
        X_train.shape[1],
        prev_code_size,
        corruption=0.0,  # Disable corruption for conversion
        enc=tf.nn.tanh,
        dec=None)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        saver = tf.train.Saver(prev_model["params"],
                               write_version=tf.train.SaverDef.V2)
        if os.path.isfile(prev_model_path):
            saver.restore(sess, prev_model_path)
        X_train = sess.run(prev_model["encode"],
                           feed_dict={prev_model["input"]: X_train})
        X_valid = sess.run(prev_model["encode"],
                           feed_dict={prev_model["input"]: X_valid})
        X_test = sess.run(prev_model["encode"],
                          feed_dict={prev_model["input"]: X_test})
    del prev_model

    reset()

    learning_rate = 0.0001
    corruption = 0.9
    ae_enc = tf.nn.tanh
    ae_dec = None

    training_iters = 2000
    batch_size = 10
    n_classes = 2

    model = ae(prev_code_size,
               code_size,
               corruption=corruption,
               enc=ae_enc,
               dec=ae_dec)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        model["cost"])
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        saver = tf.train.Saver(model["params"],
                               write_version=tf.train.SaverDef.V2)
        prev_costs = np.array([9999999999] * 3)
        for epoch in range(training_iters):
            batches = range(len(X_train) / batch_size)
            costs = np.zeros((len(batches), 3))

            for ib in batches:
                from_i = ib * batch_size
                to_i = (ib + 1) * batch_size
                batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]
                _, cost_train = sess.run([optimizer, model["cost"]],
                                         feed_dict={model["input"]: batch_xs})
                cost_valid = sess.run(model["cost"],
                                      feed_dict={model["input"]: X_valid})
                cost_test = sess.run(model["cost"],
                                     feed_dict={model["input"]: X_test})
                costs[ib] = [cost_train, cost_valid, cost_test]
            costs = costs.mean(axis=0)
            cost_train, cost_valid, cost_test = costs
            print format_config(
                "Exp={experiment}, Model=ae2, Iter={epoch:5d}, Cost={cost_train:.6f} {cost_valid:.6f} {cost_test:.6f}",
                {
                    "experiment": experiment,
                    "epoch": epoch,
                    "cost_train": cost_train,
                    "cost_valid": cost_valid,
                    "cost_test": cost_test,
                }),
            if cost_valid < prev_costs[1]:
                print "Saving better model"
                saver.save(sess, model_path)
                prev_costs = costs
            else:
                print
예제 #19
0
파일: nn.py 프로젝트: zfyyfz12/AIMAFE
def run_nn(hdf5, experiment, code_size_1, code_size_2, code_size_3):

    exp_storage = hdf5["experiments"]["cc200_whole"]
    #exp_storage = hdf5["experiments"]["aal_whole"]
    #exp_storage = hdf5["experiments"]["dosenbach160_whole"]

    for fold in exp_storage:

        experiment_cv = format_config("{experiment}_{fold}", {
            "experiment": experiment,
            "fold": fold,
        })

        X_train, y_train, \
        X_valid, y_valid, \
        X_test, y_test,test_pid = load_fold(hdf5["patients"], exp_storage, fold)

        ae1_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-1.ckpt",
            {
                "experiment": experiment_cv,
            })
        ae2_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-2.ckpt",
            {
                "experiment": experiment_cv,
            })

        ae3_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-3.ckpt",
            {
                "experiment": experiment_cv,
            })
        nn_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_mlp.ckpt", {
                "experiment": experiment_cv,
            })

        #         ae1_model_path = format_config("./data/aal_tichu_2500_1250_625/{experiment}_autoencoder-1.ckpt", {
        #             "experiment": experiment_cv,
        #         })
        #         ae2_model_path = format_config("./data/aal_tichu_2500_1250_625/{experiment}_autoencoder-2.ckpt", {
        #             "experiment": experiment_cv,
        #         })

        #         ae3_model_path = format_config("./data/aal_tichu_2500_1250_625/{experiment}_autoencoder-3.ckpt", {
        #             "experiment": experiment_cv,
        #         })
        #         nn_model_path = format_config("./data/aal_tichu_2500_1250_625/{experiment}_mlp.ckpt", {
        #             "experiment": experiment_cv,
        #         })

        #         ae1_model_path = format_config("./data/dosenbach160_tichu_2500_1250_625/{experiment}_autoencoder-1.ckpt", {
        #             "experiment": experiment_cv,
        #         })
        #         ae2_model_path = format_config("./data/dosenbach160_tichu_2500_1250_625/{experiment}_autoencoder-2.ckpt", {
        #             "experiment": experiment_cv,
        #         })

        #         ae3_model_path = format_config("./data/dosenbach160_tichu_2500_1250_625/{experiment}_autoencoder-3.ckpt", {
        #             "experiment": experiment_cv,
        #         })
        #         nn_model_path = format_config("./data/dosenbach160_tichu_2500_1250_625/{experiment}_mlp.ckpt", {
        #             "experiment": experiment_cv,
        #         })

        reset()

        # Run first autoencoder
        run_autoencoder1(experiment_cv,
                         X_train,
                         y_train,
                         X_valid,
                         y_valid,
                         X_test,
                         y_test,
                         model_path=ae1_model_path,
                         code_size=code_size_1)

        reset()

        # Run second autoencoder
        run_autoencoder2(experiment_cv,
                         X_train,
                         y_train,
                         X_valid,
                         y_valid,
                         X_test,
                         y_test,
                         model_path=ae2_model_path,
                         prev_model_path=ae1_model_path,
                         prev_code_size=code_size_1,
                         code_size=code_size_2)

        reset()

        run_autoencoder3(experiment_cv,
                         X_train,
                         y_train,
                         X_valid,
                         y_valid,
                         X_test,
                         y_test,
                         model_path=ae3_model_path,
                         prev_model_path=ae2_model_path,
                         prev_code_size=code_size_2,
                         code_size=code_size_3)

        reset()

        # Run multilayer NN with pre-trained autoencoders
        run_finetuning(experiment_cv,
                       X_train,
                       y_train,
                       X_valid,
                       y_valid,
                       X_test,
                       y_test,
                       model_path=nn_model_path,
                       prev_model_1_path=ae1_model_path,
                       prev_model_2_path=ae2_model_path,
                       prev_model_3_path=ae3_model_path,
                       code_size_1=code_size_1,
                       code_size_2=code_size_2,
                       code_size_3=code_size_3)
예제 #20
0
 def reset_parameters(self):
     reset(self.nn)
     uniform(self.in_channels, self.root)
     uniform(self.in_channels, self.bias)
예제 #21
0
def update_likes(bot, update):
    utils.reset()
    update.message.reply_text('Успешно')
예제 #22
0
    def run_internal(self):

        dialog = xbmcgui.Dialog()

        startupComplete = False

        log.warn("---===### Starting LibrarySync ###===---")

        # Verify database structure, otherwise create it.
        self._verify_emby_database()

        while not self.monitor.abortRequested():

            # In the event the server goes offline
            while self.suspend_thread:
                # Set in service.py
                if self.monitor.waitForAbort(5):
                    # Abort was requested while waiting. We should exit
                    break

            if (window('emby_dbCheck') != "true"
                    and settings('SyncInstallRunDone') == "true"):
                # Verify the validity of the database

                embyconn = utils.kodiSQL('emby')
                embycursor = embyconn.cursor()
                emby_db = embydb.Embydb_Functions(embycursor)
                currentVersion = emby_db.get_version()
                ###$ Begin migration $###
                if not currentVersion:
                    currentVersion = emby_db.get_version(
                        settings('dbCreatedWithVersion')
                        or self.clientInfo.get_version())
                    embyconn.commit()
                    log.info("Migration of database version completed")
                ###$ End migration $###
                embycursor.close()
                window('emby_version', value=currentVersion)

                minVersion = window('emby_minDBVersion')
                uptoDate = self.compareDBVersion(currentVersion, minVersion)

                if not uptoDate:
                    log.warn(
                        "Database version out of date: %s minimum version required: %s"
                        % (currentVersion, minVersion))

                    resp = dialog.yesno(lang(29999), lang(33022))
                    if not resp:
                        log.warn(
                            "Database version is out of date! USER IGNORED!")
                        dialog.ok(lang(29999), lang(33023))
                    else:
                        utils.reset()

                    break

                window('emby_dbCheck', value="true")

            if not startupComplete:
                # Verify the video database can be found
                videoDb = utils.getKodiVideoDBPath()
                if not xbmcvfs.exists(videoDb):
                    # Database does not exists
                    log.error("The current Kodi version is incompatible "
                              "with the Emby for Kodi add-on. Please visit "
                              "https://github.com/MediaBrowser/Emby.Kodi/wiki "
                              "to know which Kodi versions are supported.")

                    dialog.ok(heading=lang(29999), line1=lang(33024))
                    break

                # Run start up sync
                log.warn("Database version: %s", window('emby_version'))
                log.info("SyncDatabase (started)")
                startTime = datetime.now()
                librarySync = self.startSync()
                elapsedTime = datetime.now() - startTime
                log.info("SyncDatabase (finished in: %s) %s" %
                         (str(elapsedTime).split('.')[0], librarySync))

                # Add other servers at this point
                # TODO: re-add once plugin listing is created
                # self.user.load_connect_servers()

                # Only try the initial sync once per kodi session regardless
                # This will prevent an infinite loop in case something goes wrong.
                startupComplete = True

            # Process updates
            if window('emby_dbScan') != "true" and window(
                    'emby_shouldStop') != "true":
                self.incrementalSync()

            if window('emby_onWake') == "true" and window(
                    'emby_online') == "true":
                # Kodi is waking up
                # Set in kodimonitor.py
                window('emby_onWake', clear=True)
                if window('emby_syncRunning') != "true":
                    log.info("SyncDatabase onWake (started)")
                    librarySync = self.startSync()
                    log.info("SyncDatabase onWake (finished) %s" % librarySync)

            if self.stop_thread:
                # Set in service.py
                log.debug("Service terminated thread.")
                break

            if self.monitor.waitForAbort(1):
                # Abort was requested while waiting. We should exit
                break

        log.warn("###===--- LibrarySync Stopped ---===###")
def main():
    ################ load ###################
    if os.path.exists('D:/Software/GamaWorkspace/Python/weight/actor.pkl'):
        actor =  Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load('D:/Software/GamaWorkspace/Python/weight/actor.pkl'))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    if os.path.exists('D:/Software/GamaWorkspace/Python/weight/critic.pkl'):
        critic = Critic(state_size, action_size).to(device)
        critic.load_state_dict(torch.load('D:/Software/GamaWorkspace/Python/weight/critic.pkl'))
        print('Critic Model loaded')
    else:
        critic = Critic(state_size, action_size).to(device)
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()

    optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))#optim.Adam(actor.parameters())  
    optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))#optim.Adam(critic.parameters())

    episode = 0
    test = "GAMA"
    state,reward,done,time_pass,over = GAMA_connect(test)
    print("done:",done,"timepass:"******"acceleration: ",action.cpu().numpy())#,"action.cpu().numpy()",type(float(action.cpu().numpy()))
            to_GAMA = [[1,float(action.cpu().numpy()*10)]] #行
            np.savetxt(from_python_1,to_GAMA,delimiter=',')
            np.savetxt(from_python_2,to_GAMA,delimiter=',')
            #前回の報酬
            rewards.append(torch.tensor([reward], dtype=torch.float, device=device)) #contains the last
            masks.append(torch.tensor([1-done], dtype=torch.float, device=device))   #over-0; otherwise-1 contains the last
            log_prob = log_prob.unsqueeze(0) #log_prob = dist.log_prob(action).unsqueeze(0)       # entropy += dist.entropy().mean()
            log_probs.append(log_prob)
            values.append(value)
            entropy += entropy
        # 終わり 
        elif done == 1:
            print("restart acceleration: 0")
            to_GAMA = [[1,0]]
            np.savetxt(from_python_1,to_GAMA,delimiter=',')
            np.savetxt(from_python_2,to_GAMA,delimiter=',')
            #先传后计算
            rewards.append(torch.tensor([reward], dtype=torch.float, device=device)) #contains the last
            masks.append(torch.tensor([1-done], dtype=torch.float, device=device))   #over-0; otherwise-1 contains the last
            
            total_reward = sum(rewards)
            total_rewards.append(total_reward)

            last_state = torch.FloatTensor(state).to(device)
            last_value = critic(last_state)
            returns = compute_returns(last_value, rewards, masks) 
            values_next = returns[1:]#values[1:]
            values_next.append(torch.tensor([0], dtype=torch.float, device=device))
            
            log_probs = torch.cat(log_probs,1).squeeze()  #Concatenates the given sequence of seq tensors in the given dimension.
            returns = torch.cat(returns).detach()
            values = torch.cat(values)
            values_next = torch.cat(values_next)
            rewards = torch.cat(rewards)

            # TD:r(s) + v(s+1) - v(s)      #rewards.detach() + values_next - values  r(s) MC: returns.detach() - values???
            advantage = returns.detach() - values
            actor_loss = -(log_probs * advantage.detach()).mean()
            loss = advantage.pow(2).sum()
            loss.detach()
            critic_loss = (returns.detach() - values).pow(2).mean()

            optimizerA.zero_grad()
            optimizerC.zero_grad()
            actor_loss.backward()
            critic_loss.backward()
            optimizerA.step()
            optimizerC.step()

            print("--------------------------Net_Trained-------------------------------")
            print('--------------------------Iteration:',episode,'over--------------------------------')
            episode += 1
            log_probs = []
            values = []
            rewards = []
            masks = []
            torch.save(actor.state_dict(), 'D:/Software/GamaWorkspace/Python/weight/actor.pkl')
            torch.save(critic.state_dict(), 'D:/Software/GamaWorkspace/Python/weight/critic.pkl')
            #print("entropy: ",entropy,"total_rewards:",total_rewards)
            entropys.append(entropy)
            total_loss.append(loss)
            if(episode!=0):
                cross_loss_curve(total_loss,total_rewards)
            loss = 0

            if episode > 90  :
                new_lr = lr * (0.92 ** ((episode-80) // 10))
                optimizerA = optim.Adam(actor.parameters(), new_lr, betas=(0.95, 0.999))
                optimizerC = optim.Adam(critic.parameters(), new_lr, betas=(0.95, 0.999))

        #最初の時
        else:
            print('Iteration:',episode)
            state = torch.FloatTensor(state).to(device)
            value =  critic(state)  #dist,  # now is a tensoraction = dist.sample() 
            action,log_prob,entropy = actor(state)
            print("acceleration: ",float(action.cpu().numpy()*10))
            to_GAMA = [[1,action.cpu().numpy()]]
            np.savetxt(from_python_1,to_GAMA,delimiter=',')
            np.savetxt(from_python_1,to_GAMA,delimiter=',')
            log_prob = log_prob.unsqueeze(0) #log_prob = dist.log_prob(action).unsqueeze(0) #entropy += dist.entropy().mean()
            log_probs.append(log_prob)
            values.append(value)
            entropy += entropy

        state,reward,done,time_pass,over = GAMA_connect(test)
    return None #[action,log_prob_return,value]
예제 #24
0
def nn_results(hdf5, experiment, code_size_1, code_size_2, code_size_3):

    exp_storage = hdf5["experiments"]['cc200_whole']

    experiment = "cc200_whole"

    print exp_storage

    n_classes = 2

    results = []

    list = ['']

    list2 = []

    for fold in exp_storage:

        experiment_cv = format_config("{experiment}_{fold}", {
            "experiment": experiment,
            "fold": fold,
        })

        print "experiment_cv"

        print fold

        X_train, y_train, \
        X_valid, y_valid, \
        X_test, y_test,test_pid = load_fold(hdf5["patients"], exp_storage, fold)

        list.append(test_pid)

        print "X_train"

        print X_train.shape

        y_test = np.array([to_softmax(n_classes, y) for y in y_test])

        ae1_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-1.ckpt",
            {
                "experiment": experiment_cv,
            })
        ae2_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-2.ckpt",
            {
                "experiment": experiment_cv,
            })

        ae3_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-3.ckpt",
            {
                "experiment": experiment_cv,
            })

        nn_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_mlp.ckpt", {
                "experiment": experiment_cv,
            })

        try:

            model = nn(X_test.shape[1], n_classes, [
                {
                    "size": 2500,
                    "actv": tf.nn.tanh
                },
                {
                    "size": 1250,
                    "actv": tf.nn.tanh
                },
                {
                    "size": 625,
                    "actv": tf.nn.tanh
                },
            ])

            init = tf.global_variables_initializer()
            with tf.Session() as sess:

                sess.run(init)

                saver = tf.train.Saver(model["params"])

                print "savernn_model_path"

                print nn_model_path

                saver.restore(sess, nn_model_path)

                output = sess.run(model["output"],
                                  feed_dict={
                                      model["input"]: X_test,
                                      model["dropouts"][0]: 1.0,
                                      model["dropouts"][1]: 1.0,
                                      model["dropouts"][2]: 1.0,
                                  })

                np.set_printoptions(suppress=True)

                y_score = output[:, 1]

                print "y_score"

                print y_score

                y_pred = np.argmax(output, axis=1)

                print "y_pred"
                print y_pred

                print "output"

                hang = output.shape[0]

                lie = output.shape[1]

                print hang

                print lie

                for tt in range(hang):
                    for xx in range(lie):

                        output[tt][xx] = round(output[tt][xx], 4)

                        output[tt][xx] = str(output[tt][xx])

                aa = output[:, 0]

                print type(aa)

                list2.append(output)

                list.append(y_pred)

                print "-------------------------------------"

                y_true = np.argmax(y_test, axis=1)

                list.append(y_true)

                print "y_true"
                print y_true

                auc_score = roc_auc_score(y_true, y_score)
                print auc_score

                [[TN, FP], [FN,
                            TP]] = confusion_matrix(y_true,
                                                    y_pred,
                                                    labels=[0,
                                                            1]).astype(float)
                accuracy = (TP + TN) / (TP + TN + FP + FN)

                print(TP)
                print(TN)
                print(FP)
                print(FN)
                specificity = TN / (FP + TN)
                precision = TP / (TP + FP)
                sensivity = recall = TP / (TP + FN)
                fscore = 2 * TP / (2 * TP + FP + FN)

                results.append([
                    accuracy, precision, recall, fscore, sensivity,
                    specificity, auc_score
                ])
        finally:
            reset()

    workbook = xlwt.Workbook(encoding='utf-8')

    booksheet = workbook.add_sheet('Sheet 1', cell_overwrite_ok=True)

    wb = xlwt.Workbook(encoding='utf-8')

    worksheet = wb.add_sheet('Sheet 1', cell_overwrite_ok=True)

    DATA = list

    print list2

    for i, row in enumerate(DATA):
        for j, col in enumerate(row):
            booksheet.write(j, i, col)
    # workbook.save('./data/dos_tichu_2500_1250_625_xlst.xls')

    return [experiment] + np.mean(results, axis=0).tolist()
예제 #25
0
from mathutils import Vector

import animation
import objects
import utils
import render

# reset scene
utils.reset()

# determine grey horizon color
bpy.data.worlds["World"].horizon_color = [0.04, 0.04, 0.04]

# create some common objects
scene = bpy.context.scene

camera_rotation_n_frames = 100

# ─── TITLE ─────────────────────────────────────────────────────────────────────

print("STAGE: title")

stage_title = bpy.data.objects.new("stage_title", None)
stage_title.rotation_euler = Vector((0, 0, np.pi / 2))
stage_title.location = Vector((-20, 0, 0))
scene.objects.link(stage_title)

stage_title["start"] = 0

# add camera and initially look at title stage
camera = utils.camera((0, 0, 0), lens=20)
예제 #26
0
    def __init__(self):
        log.debug('Full sys.argv received: %s' % argv)
        # Parse parameters
        params = dict(parse_qsl(argv[2][1:]))
        mode = params.get('mode', '')
        itemid = params.get('id', '')

        if mode == 'play':
            self.play()

        elif mode == 'plex_node':
            self.play()

        elif mode == 'ondeck':
            entrypoint.getOnDeck(itemid,
                                 params.get('type'),
                                 params.get('tagname'),
                                 int(params.get('limit')))

        elif mode == 'recentepisodes':
            entrypoint.getRecentEpisodes(itemid,
                                         params.get('type'),
                                         params.get('tagname'),
                                         int(params.get('limit')))

        elif mode == 'nextup':
            entrypoint.getNextUpEpisodes(params['tagname'],
                                         int(params['limit']))

        elif mode == 'inprogressepisodes':
            entrypoint.getInProgressEpisodes(params['tagname'],
                                             int(params['limit']))

        elif mode == 'browseplex':
            entrypoint.browse_plex(key=params.get('key'),
                                   plex_section_id=params.get('id'))

        elif mode == 'getsubfolders':
            entrypoint.GetSubFolders(itemid)

        elif mode == 'watchlater':
            entrypoint.watchlater()

        elif mode == 'channels':
            entrypoint.channels()

        elif mode == 'settings':
            executebuiltin('Addon.OpenSettings(%s)' % v.ADDON_ID)

        elif mode == 'enterPMS':
            entrypoint.enterPMS()

        elif mode == 'reset':
            reset()

        elif mode == 'togglePlexTV':
            entrypoint.togglePlexTV()

        elif mode == 'resetauth':
            entrypoint.resetAuth()

        elif mode == 'passwords':
            passwordsXML()

        elif mode == 'switchuser':
            entrypoint.switchPlexUser()

        elif mode in ('manualsync', 'repair'):
            if window('plex_online') != 'true':
                # Server is not online, do not run the sync
                dialog('ok', lang(29999), lang(39205))
                log.error('Not connected to a PMS.')
            else:
                if mode == 'repair':
                    window('plex_runLibScan', value='repair')
                    log.info('Requesting repair lib sync')
                elif mode == 'manualsync':
                    log.info('Requesting full library scan')
                    window('plex_runLibScan', value='full')

        elif mode == 'texturecache':
            window('plex_runLibScan', value='del_textures')

        elif mode == 'chooseServer':
            entrypoint.chooseServer()

        elif mode == 'refreshplaylist':
            log.info('Requesting playlist/nodes refresh')
            window('plex_runLibScan', value='views')

        elif mode == 'deviceid':
            self.deviceid()

        elif mode == 'fanart':
            log.info('User requested fanarttv refresh')
            window('plex_runLibScan', value='fanart')

        elif '/extrafanart' in argv[0]:
            plexpath = argv[2][1:]
            plexid = itemid
            entrypoint.getExtraFanArt(plexid, plexpath)
            entrypoint.getVideoFiles(plexid, plexpath)

        # Called by e.g. 3rd party plugin video extras
        elif ('/Extras' in argv[0] or '/VideoFiles' in argv[0] or
                '/Extras' in argv[2]):
            plexId = itemid or None
            entrypoint.getVideoFiles(plexId, params)

        else:
            entrypoint.doMainListing(content_type=params.get('content_type'))
예제 #27
0
def nn_results(hdf5, experiment, code_size_1, code_size_2):

    exp_storage = hdf5["experiments"][experiment]

    n_classes = 2

    results = []

    for fold in exp_storage:

        experiment_cv = format_config("{experiment}_{fold}", {
            "experiment": experiment,
            "fold": fold,
        })

        X_train, y_train, \
        X_valid, y_valid, \
        X_test, y_test = load_fold(hdf5["patients"], exp_storage, fold)

        y_test = np.array([to_softmax(n_classes, y) for y in y_test])

        ae1_model_path = format_config(
            "./data/models/{experiment}_autoencoder-1.ckpt", {
                "experiment": experiment_cv,
            })
        ae2_model_path = format_config(
            "./data/models/{experiment}_autoencoder-2.ckpt", {
                "experiment": experiment_cv,
            })
        nn_model_path = format_config("./data/models/{experiment}_mlp.ckpt", {
            "experiment": experiment_cv,
        })

        try:

            model = nn(X_test.shape[1], n_classes, [
                {
                    "size": 1000,
                    "actv": tf.nn.tanh
                },
                {
                    "size": 600,
                    "actv": tf.nn.tanh
                },
            ])

            init = tf.global_variables_initializer()
            with tf.Session() as sess:

                sess.run(init)

                saver = tf.train.Saver(model["params"])
                saver.restore(sess, nn_model_path)

                output = sess.run(model["output"],
                                  feed_dict={
                                      model["input"]: X_test,
                                      model["dropouts"][0]: 1.0,
                                      model["dropouts"][1]: 1.0,
                                  })

                print(output)

                y_pred = np.argmax(output, axis=1)
                y_true = np.argmax(y_test, axis=1)

                [[TN, FP], [FN,
                            TP]] = confusion_matrix(y_true,
                                                    y_pred,
                                                    labels=[0,
                                                            1]).astype(float)
                accuracy = (TP + TN) / (TP + TN + FP + FN)
                specificity = TN / (FP + TN)
                precision = TP / (TP + FP)
                sensivity = recall = TP / (TP + FN)
                fscore = 2 * TP / (2 * TP + FP + FN)

                results.append([
                    accuracy, precision, recall, fscore, sensivity, specificity
                ])
        finally:
            reset()

    return [experiment] + np.mean(results, axis=0).tolist()
예제 #28
0
from packet import Packet
from random import randint
from matplotlib import pyplot as plt
from utils import reset, count_wins, report
from globals import networks, SIZE, NTRIALS, PTIMES, STRATEGIES

NTIME_STEPS = 200
NSERVERS = SIZE**2

win_freqs = {strategy: [] for strategy in STRATEGIES}

if __name__ == '__main__':
    # Run SLB model simulation
    for ptime in PTIMES:
        for i in xrange(NTRIALS):
            reset(networks)
            for j in xrange(NTIME_STEPS):
                loc = randint(0, NSERVERS - 1)
                pkt = Packet('id-%d' % (j), ptime)
                for sim_obj in networks:
                    network = sim_obj['obj']
                    network.step()
                    network.add(pkt, loc)
                    sim_obj['dat'][ptime][i] += network.latency()

    # Count and report win frequencies
    for ptime in PTIMES:
        win_counts = {strategy: 0 for strategy in STRATEGIES}
        count_wins(networks, ptime, win_counts, NTRIALS)
        report(win_counts, ptime, NTRIALS)
        for strategy, count in win_counts.iteritems():
def main():
    ################ load ###################
    actor_path = os.path.abspath(
        os.curdir) + '/PPO_Mixedinput_Navigation_Model/weight/AC_TD0_actor.pkl'
    critic_path = os.path.abspath(
        os.curdir
    ) + '/PPO_Mixedinput_Navigation_Model/weight/AC_TD0_critic.pkl'
    if os.path.exists(actor_path):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    if os.path.exists(critic_path):
        critic = Critic(state_size, action_size).to(device)
        critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    else:
        critic = Critic(state_size, action_size).to(device)
    critic_next = Critic(state_size, action_size).to(device)
    critic_next.load_state_dict(critic.state_dict())
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()

    episode = 1257

    lr = 0.0001
    sample_lr = [
        0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
        0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
        0.000004, 0.000003, 0.000002, 0.000001
    ]
    if episode > 50:  #50 100
        try:
            lr = sample_lr[int(episode // 50)]
        except (IndexError):
            lr = 0.000001

    optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
    optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))

    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    print("done:", done, "timepass:"******"----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            log_probs = []
            values = []
            rewards = []
            masks = []
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            cross_loss_curve(loss_sum.squeeze(0), total_reward, save_curve_pic,
                             save_critic_loss,
                             save_reward)  #total_loss,total_rewards
            loss = []
            memory.clear_memory()
            if episode > 50:  #50 100
                try:
                    lr = sample_lr[int(episode // 50)]
                except (IndexError):
                    lr = 0.000001
                optimizerA = optim.Adam(actor.parameters(),
                                        lr,
                                        betas=(0.95, 0.999))
                optimizerC = optim.Adam(critic.parameters(),
                                        lr,
                                        betas=(0.95, 0.999))

            torch.save(actor.state_dict(), actor_path)
            torch.save(critic.state_dict(), critic_path)

        #最初の時
        else:
            print('Iteration:', episode, "lr:", lr)
            state = np.reshape(state, (1, len(state)))  #xxx
            state_img = generate_img()
            tensor_cv = torch.from_numpy(np.transpose(
                state_img, (2, 0, 1))).double().to(device)
            state = torch.DoubleTensor(state).reshape(1, state_size).to(device)

            for _ in range(3):
                memory.states.append(state)
                memory.states_img.append(tensor_cv)
            state = torch.stack(memory.states).to(device).detach()  ###
            tensor_cv = torch.stack(memory.states_img).to(device).detach()
            value = critic(
                state,
                tensor_cv)  #dist,  # now is a tensoraction = dist.sample()
            action, log_prob, entropy = actor(state, tensor_cv)
            print("acceleration: ", action.cpu().numpy())
            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
            log_prob = log_prob.unsqueeze(0)
            entropy += entropy

        state, reward, done, time_pass, over = GAMA_connect(test)
    return None
예제 #30
0
파일: nn.py 프로젝트: uuup111/acerta-abide
def run_autoencoder2(experiment,
                     X_train,
                     y_train,
                     X_valid,
                     y_valid,
                     X_test,
                     y_test,
                     model_path,
                     prev_model_path,
                     code_size=600,
                     prev_code_size=1000):
    """

    Run the second autoencoder.
    It takes the dimensionality from first autoencoder and compresses it into the new `code_size`
    Firstly, we need to convert original data to the new projection from autoencoder 1.

    """

    if os.path.isfile(model_path) or \
       os.path.isfile(model_path + ".meta"):
        return

    # tf.disable_v2_behavior()

    # Convert training, validation and test set to the new representation
    prev_model = ae(
        X_train.shape[1],
        prev_code_size,
        corruption=0.0,  # Disable corruption for conversion
        enc=tf.nn.tanh,
        dec=None)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        saver = tf.train.Saver(prev_model["params"],
                               write_version=tf.train.SaverDef.V2)
        if os.path.isfile(prev_model_path):
            saver.restore(sess, prev_model_path)
        X_train = sess.run(prev_model["encode"],
                           feed_dict={prev_model["input"]: X_train})
        X_valid = sess.run(prev_model["encode"],
                           feed_dict={prev_model["input"]: X_valid})
        X_test = sess.run(prev_model["encode"],
                          feed_dict={prev_model["input"]: X_test})
    del prev_model

    reset()

    # Hyperparameters
    learning_rate = 0.0001
    corruption = 0.9
    ae_enc = tf.nn.tanh
    ae_dec = None

    training_iters = 2000
    batch_size = 10
    n_classes = 2

    # Load model
    model = ae(prev_code_size,
               code_size,
               corruption=corruption,
               enc=ae_enc,
               dec=ae_dec)

    # Use GD for optimization of model cost
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        model["cost"])

    # Initialize Tensorflow session
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        # Define model saver
        saver = tf.train.Saver(model["params"],
                               write_version=tf.train.SaverDef.V2)

        # Initialize with an absurd cost for model selection
        prev_costs = np.array([9999999999] * 3)

        # Iterate Epochs
        for epoch in range(training_iters):

            # Break training set into batches
            batches = range(int(len(X_train) / batch_size))
            costs = np.zeros((len(batches), 3))

            for ib in batches:

                # Compute start and end of batch from training set data array
                from_i = ib * batch_size
                to_i = (ib + 1) * batch_size

                # Select current batch
                batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]

                # Run optimization and retrieve training cost
                _, cost_train = sess.run([optimizer, model["cost"]],
                                         feed_dict={model["input"]: batch_xs})

                # Compute validation cost
                cost_valid = sess.run(model["cost"],
                                      feed_dict={model["input"]: X_valid})

                # Compute test cost
                cost_test = sess.run(model["cost"],
                                     feed_dict={model["input"]: X_test})

                costs[ib] = [cost_train, cost_valid, cost_test]

            # Compute the average costs from all batches
            costs = costs.mean(axis=0)
            cost_train, cost_valid, cost_test = costs

            # Pretty print training info
            print(
                "Exp={experiment}, Model=ae2, Iter={epoch:5d}, Cost={cost_train:.6f} {cost_valid:.6f} {cost_test:.6f}",
                {
                    "experiment": experiment,
                    "epoch": epoch,
                    "cost_train": cost_train,
                    "cost_valid": cost_valid,
                    "cost_test": cost_test,
                })

            # Save better model if optimization achieves a lower cost
            if cost_valid < prev_costs[1]:
                print("Saving better model")
                saver.save(sess, model_path)
                prev_costs = costs
            else:
                print
예제 #31
0
파일: nn.py 프로젝트: uuup111/acerta-abide
def run_nn(hdf5, experiment, code_size_1, code_size_2):
    # tf.disable_v2_behavior()

    exp_storage = hdf5["experiments"][experiment]

    for fold in exp_storage:

        experiment_cv = format_config("{experiment}_{fold}", {
            "experiment": experiment,
            "fold": fold,
        })

        X_train, y_train, \
        X_valid, y_valid, \
        X_test, y_test = load_fold(hdf5["patients"], exp_storage, fold)

        ae1_model_path = format_config(
            "./data/models/{experiment}_autoencoder-1.ckpt", {
                "experiment": experiment_cv,
            })
        ae2_model_path = format_config(
            "./data/models/{experiment}_autoencoder-2.ckpt", {
                "experiment": experiment_cv,
            })
        nn_model_path = format_config("./data/models/{experiment}_mlp.ckpt", {
            "experiment": experiment_cv,
        })

        reset()

        # Run first autoencoder
        run_autoencoder1(experiment_cv,
                         X_train,
                         y_train,
                         X_valid,
                         y_valid,
                         X_test,
                         y_test,
                         model_path=ae1_model_path,
                         code_size=code_size_1)

        reset()

        # Run second autoencoder
        run_autoencoder2(experiment_cv,
                         X_train,
                         y_train,
                         X_valid,
                         y_valid,
                         X_test,
                         y_test,
                         model_path=ae2_model_path,
                         prev_model_path=ae1_model_path,
                         prev_code_size=code_size_1,
                         code_size=code_size_2)

        reset()

        # Run multilayer NN with pre-trained autoencoders
        run_finetuning(experiment_cv,
                       X_train,
                       y_train,
                       X_valid,
                       y_valid,
                       X_test,
                       y_test,
                       model_path=nn_model_path,
                       prev_model_1_path=ae1_model_path,
                       prev_model_2_path=ae2_model_path,
                       code_size_1=code_size_1,
                       code_size_2=code_size_2)
예제 #32
0
def main():
    ################ load ###################
    actor_path = os.path.abspath(
        os.curdir) + '/Generate_Traffic_Flow_MAS_RL/weight/AC_TD2_actor.pkl'
    if os.path.exists(actor_path):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()

    Using_LSTM = False
    test = "GAMA"
    N_agent = 20
    list_hidden = []

    count = 0
    ##################  start  #########################
    state = GAMA_connect(test)
    print("Connected")
    while True:
        if Using_LSTM == False:
            state = [
                torch.DoubleTensor(elem).reshape(1, state_size).to(device)
                for elem in state
            ]
            state = torch.stack(state).to(device).detach()
            tensor_cv = generate_img()
            tensor_cv = [
                torch.from_numpy(np.transpose(elem,
                                              (2, 0, 1))).double().to(device) /
                255 for elem in tensor_cv
            ]
            tensor_cv = torch.stack(tensor_cv).to(device).detach()

            action, h_state_cv_a, h_state_n_a = actor(state, tensor_cv)

            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])

        else:
            if len(list_hidden) < N_agent:
                state = [
                    torch.DoubleTensor(elem).reshape(1, state_size).to(device)
                    for elem in state
                ]
                state = torch.stack(state).to(device).detach()
                tensor_cv = generate_img()
                tensor_cv = [
                    torch.from_numpy(np.transpose(
                        elem, (2, 0, 1))).double().to(device) / 255
                    for elem in tensor_cv
                ]
                tensor_cv = torch.stack(tensor_cv).to(device).detach()

                action, h_state_cv_a, h_state_n_a = actor(state, tensor_cv)

                send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
                list_hidden.append(Memory(h_state_cv_a, h_state_n_a))
                count += 1

            else:
                state = [
                    torch.DoubleTensor(elem).reshape(1, state_size).to(device)
                    for elem in state
                ]
                state = torch.stack(state).to(device).detach()
                tensor_cv = generate_img()
                tensor_cv = [
                    torch.from_numpy(np.transpose(
                        elem, (2, 0, 1))).double().to(device) / 255
                    for elem in tensor_cv
                ]
                tensor_cv = torch.stack(tensor_cv).to(device).detach()

                action, h_state_cv_a, h_state_n_a = actor(
                    state, tensor_cv,
                    list_hidden[count % N_agent].h_state_cv_a,
                    list_hidden[count % N_agent].h_state_n_a)

                send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
                list_hidden[count % N_agent].set_hidden(
                    h_state_cv_a, h_state_n_a)
                count += 1

        state = GAMA_connect(test)

    return None
예제 #33
0
####################################

write_MAC_cmd = ('ets', 'PPWR=2,' + mac)
read_MAC_cmd = ('ets', 'PPRE=2')
if check_ser_port is True:
    # Writes the input MAC to DUT
    utils.debug('Writing input MAC to DUT')
    out_write = serial_com.write_cmd(write_MAC_cmd)
    serial_com.write_cmd('q')

    time.sleep(0.05)

    # Resets DUT
    utils.debug('Resetting DUT ...')
    utils.reset(os.environ['nrfjprog_PATH'])
    time.sleep(5)

    # Reads MAC of DUT
    utils.debug('Reading MAC from DUT')
    out_read = serial_com.write_cmd(read_MAC_cmd)
    serial_com.write_cmd('q')

    if mac in out_read:
        utils.debug('Succefully writing input MAC to DUT')
    else:
        utils.debug('Writing MAC to DUT failed')
else:
    utils.debug('Serial port is not open')

    ser.open()
def main():
    ################ load ###################
    if os.path.exists(
            'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/actor.pkl'
    ):
        actor = Actor(state_size, action_size).to(device)
        actor.load_state_dict(
            torch.load(
                'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/actor.pkl'
            ))
        print('Actor Model loaded')
    else:
        actor = Actor(state_size, action_size).to(device)
    if os.path.exists(
            'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/critic.pkl'
    ):
        critic = Critic(state_size, action_size).to(device)
        critic.load_state_dict(
            torch.load(
                'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/critic.pkl'
            ))
        print('Critic Model loaded')
    else:
        critic = Critic(state_size, action_size).to(device)
    print("Waiting for GAMA...")
    ################### initialization ########################
    reset()
    lr = 0.00007

    optimizerA = optim.Adam(actor.parameters(), lr, betas=(0.95, 0.999))
    optimizerC = optim.Adam(critic.parameters(), lr, betas=(0.95, 0.999))

    episode = 0
    test = "GAMA"
    state, reward, done, time_pass, over = GAMA_connect(test)  #connect
    print("done:", done, "timepass:"******"restart acceleration: 0")
            send_to_GAMA([[1, 0]])
            #先传后计算
            rewards.append(reward)  #contains the last
            reward = torch.tensor([reward], dtype=torch.float, device=device)
            rewards.append(reward)  #contains the last
            total_reward = sum(rewards)
            total_rewards.append(total_reward)

            #state = torch.FloatTensor(state).reshape(1,4).to(device)
            #last_value= critic(state)

            with torch.autograd.set_detect_anomaly(True):
                advantage = reward.detach(
                ) - value  #+ last_value   最后一回的V(s+1) = 0
                actor_loss = -(log_prob * advantage.detach())
                print("actor_loss, ", actor_loss, " size", actor_loss.dim())
                critic_loss = (reward.detach() - value).pow(2)  #+ last_value
                lstm_loss = critic_loss

                optimizerA.zero_grad()
                optimizerC.zero_grad()

                critic_loss.backward(retain_graph=True)
                actor_loss.backward(retain_graph=True)
                loss.append(critic_loss)

                optimizerA.step()
                optimizerC.step()

            print(
                "----------------------------------Net_Trained---------------------------------------"
            )
            print('--------------------------Iteration:', episode,
                  'over--------------------------------')
            episode += 1
            log_probs = []
            values = []
            rewards = []
            masks = []
            torch.save(
                actor.state_dict(),
                'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/actor.pkl'
            )
            torch.save(
                critic.state_dict(),
                'D:/Software/PythonWork/GAMA_python/A2C-TD-single-car-intersection/weight/critic.pkl'
            )
            loss_sum = sum(loss)
            total_loss.append(loss_sum)
            cross_loss_curve(total_loss, total_rewards)
            loss = []
            if episode > 50:  #50
                lr = 0.0002
                if episode > 115:
                    lr = 0.0001
                new_lr = lr * (0.94**((episode - 40) // 10))  #40
                optimizerA = optim.Adam(actor.parameters(),
                                        new_lr,
                                        betas=(0.95, 0.999))
                optimizerC = optim.Adam(critic.parameters(),
                                        new_lr,
                                        betas=(0.95, 0.999))

        #最初の時
        else:
            print('Iteration:', episode)
            state = np.reshape(state, (1, len(state)))  #xxx
            state = torch.FloatTensor(state).reshape(1, 4).to(device)
            value = critic(
                state)  #dist,  # now is a tensoraction = dist.sample()
            action, log_prob, entropy = actor(state)
            print("acceleration: ", action.cpu().numpy())
            send_to_GAMA([[1, float(action.cpu().numpy() * 10)]])
            log_prob = log_prob.unsqueeze(0)
            entropy += entropy

        state, reward, done, time_pass, over = GAMA_connect(test)
    return None
예제 #35
0
파일: run.py 프로젝트: a4800061/lottery
def reset():
    return utils.reset()