コード例 #1
0
ファイル: repository.py プロジェクト: haimroizman/chat
class Repository:
    def __init__(self, section):
        self.chat_db_config = self.read_db_config(section)  # Open Database connection
        self.logger = LogManager("chatApp")

    # For general execution, just pass the query
    def execute_query(self, query, params):
        conn = MySQLdb.connect(self.chat_db_config['host'], self.chat_db_config['user'],
                               self.chat_db_config['password'], self.chat_db_config['database'])
        cursor = conn.cursor()
        try:
            # Execute the SQL command
            cursor.execute(query, params)
            # Commit your changes in the database
            conn.commit()
        except Error as error:
            conn.rollback()
            self.logger.exception_log(error)
        conn.close()

    # A function with a yield return builds a generator object
    def execute_select_generator(self, query):

        conn = MySQLdb.connect(self.chat_db_config['host'], self.chat_db_config['user'],
                               self.chat_db_config['password'], self.chat_db_config['database'])
        cursor = conn.cursor()
        try:
            cursor.execute(query)
            results = cursor.fetchall()
            for row in results:
                yield row
        except Exception.message, error:
            self.logger.error_log("Error: unable to fetch data")
        conn.close()
コード例 #2
0
ファイル: DM.py プロジェクト: JinSakuma/ZoomWoZSystem
 def __init__(self, config):
     self.config = config
     self.api = DB_API(self.config)
     self.logger = LogManager(self.config)
     self.cash_output = None
     self.cash_command = None
     self.cash_slot = None
コード例 #3
0
def test(fm: FileManager):
    with open(fm['config'], 'rb') as f:
        cfg = pickle.load(f)
    test_data = LoadDatasetManager(fm['test'],
                                   dict_classes=cfg['classes']).get_dataset()
    bm_test = BatchManager(test_data, cfg, StatusDatasets.Test)
    res = Tester(cfg, bm_test, fm['model']).run()
    LogManager.write_log(cfg, res, './logs/log.txt')
コード例 #4
0
class ChatClearHistoryPlugin(IChatTask):
    def __init__(self):
        self.logger = LogManager("chatApp")
        self.chat_message_repository = ChatMessageRepository()

    def execute_task(self, under_num_days):
        self.logger.debug_log('The clear history task should start soon')
        self.chat_message_repository.delete_previous_message_execute(under_num_days)
コード例 #5
0
    def __init__(self, start_time=0, end_time=int(time.time())):
        self.log_manager = LogManager(start_time, end_time)
        self.ip_cache = ip_location_cache.ip_location_cache()

        # for now, get from logs. maybe use aggregator to make these calls later?
        self.servers = self.log_manager.get_unique_destinations()

        self.uuid_metadata = {
        }  # dictionary mapping uuid -> metadata (includes state-ful data)
コード例 #6
0
def live_event_change(log: LogManager, timestamp: datetime,
                      ch_onair: ProgrammeMetadata, ch_next: ProgrammeMetadata,
                      viewing_start: datetime):
    stop = make_live_stop_event(timestamp, viewing_start, ch_onair,
                                (timestamp - viewing_start).seconds)
    log.push_event(stop)
    play = make_live_play_event(timestamp, ch_next)
    log.push_event(play)
    return ch_next
コード例 #7
0
ファイル: target.py プロジェクト: yue/build-gn
 def __init__(self, out_dir, target_cpu, logs_dir):
   self._out_dir = out_dir
   self._target_cpu = target_cpu
   self._command_runner = None
   self._symbolizer_proc = None
   self._log_listener_proc = None
   self._dry_run = False
   self._started = False
   self._log_manager = LogManager(logs_dir)
   self._ffx_runner = ffx_session.FfxRunner(self._log_manager)
コード例 #8
0
 def __init__(self, graph, workspace, optim_info, mock=None):
     self.node_versions = dict()
     self.graph = graph
     self.log_manager = LogManager()
     self.workspace = workspace
     self.mock = mock
     self.local = not not mock
     self.optim_info = optim_info
     self.searcher = None
     for node in graph:
         self.node_versions[node.node_name] = []
コード例 #9
0
ファイル: target.py プロジェクト: zealoussnow/chromium
 def __init__(self, out_dir, target_cpu, logs_dir):
     self._out_dir = out_dir
     self._target_cpu = target_cpu
     self._command_runner = None
     self._symbolizer_proc = None
     self._log_listener_proc = None
     self._dry_run = False
     self._started = False
     self._ffx_path = os.path.join(common.SDK_ROOT, 'tools',
                                   common.GetHostArchFromPlatform(), 'ffx')
     self._log_manager = LogManager(logs_dir)
コード例 #10
0
    def __init__(self, path):
        # path that script will run
        self.path = path

        # flag to enable debug messages
        # if this flag "True", any file is renamed
        self.debug_mode = True

        # flag to enable log messages
        self.log_mode = True

        # object logs instances
        self.logger_duplicate = LogManager.instance().getLoggerDuplicateFile

        # list of files supported:
        self.images_list = ["jpg", "png"]
        self.video_list = ["mp4", "avi"]

        # total folders renamed by script
        self.total_folders = 0
        # total files renamed by script
        self.total_files_renamed = 0
        # total duplicated files save in log folder
        self.total_duplicated_files = 0
        # total of files analyzed based on "self.images_list" and "self.video_list"
        self.total_files = 0
        # files that already renamed and not to do with its
        self.total_files_ok = 0
コード例 #11
0
    def compare_dates(self, trashed_file_dict):
        """
        Function to compare file deletionTime with currentTime
        """
        date_format = '%Y-%m-%d %H:%M:%S'
        deletion_date = trashed_file_dict["deletionDate"]
        filename = trashed_file_dict["filename"]
        filename = os.path.basename(filename)

        # file deletion date
        deletion_date = deletion_date.replace("T", " ")
        deletion_datetime = datetime.strptime(deletion_date, date_format)

        # current time
        current_time_string = datetime.now().strftime(date_format)
        current_datetime = datetime.strptime(current_time_string, date_format)

        difference_in_days = (current_datetime - deletion_datetime).days

        real_filename = f"{self.home}/.local/share/Trash/files/{filename}"
        size = os.path.getsize(real_filename)

        if difference_in_days >= 30:
            # remove_files(filename)
            logger = LogManager.instance().getLogger
            logger.info(filename)
            return size

        return None
コード例 #12
0
    def notification(self, deleted_files):
        """
        Function to send notification in the end of script execution
        """
        notification_title = os.path.basename(__file__)

        files = deleted_files["Files"]
        size = deleted_files["Size"]
        notification_description = f"{files} Arquivos removidos: {size} Liberados"
        notification_icon = f"{os.path.dirname(os.path.abspath(__file__))}/python-logo-64px.png"

        command = f"/usr/bin/notify-send \"{notification_title}\" \"{notification_description}\" --icon={notification_icon}"

        if files != None:
            os.system(command)

        LogManager.instance().log_clean()
コード例 #13
0
    def __init__(self, config, training_manager, sess, batch_tensor_val):
        """ If we want to visualize the outputs """
        #terminal_draw_tool.start_curses()
        #terminal_draw_tool.curses_started = True

        self._batch_size = config.batch_size
        self._training_manager = training_manager
        self._sess = sess  # the session from tensorflow that is going to be used
        self._config = config
        self._training_start_time = time.time()

        self._logger = LogManager(config, training_manager, sess,
                                  self._config.use_curses)
        self.tensorboard_scalars()
        self.tensorboard_images()
        self._merged = tf.summary.merge_all()
        self._validater = ValidationManager(config, training_manager, sess,
                                            batch_tensor_val, self._merged)
        self.first_time = True
        self.store_steer = True
コード例 #14
0
ファイル: main.py プロジェクト: nguyenvanhuybk99/ForexSpark
    def setup(cls, msg_num=1, rate=1):
        task_usdeur_logger_config = LoggerConfig("TASK_USDEUR", 2)
        task_usdeur_connector_config = ConnectorConfig("KAFKA_CONNECTOR", "USDEUR")
        task_gbpusd_logger_config = LoggerConfig("TASK_GBPUSD", 2)
        task_gbpusd_connector_config = ConnectorConfig("KAFKA_CONNECTOR", "GBPUSD")

        cls.__log_manager = LogManager([(task_usdeur_logger_config, task_usdeur_connector_config),
                                        (task_gbpusd_logger_config, task_gbpusd_connector_config)],
                                       msg_num, rate)

        return cls
コード例 #15
0
    def flush(self):
        """Write the buffer to file."""
        if not self.written_catalog:
            LogManager(self.folder).register(self.filename, 'csv', self.name)
            self.written_catalog = True

        if not os.path.exists(self.filename):
            mode = 'w'
        else:
            mode = 'a'
        with open(self.filename, mode) as f:
            f.write(''.join(self.buffer))
        self.buffer = []
コード例 #16
0
    def setup(cls, msg_num=1, rate=1):
        task_usage_logger_config = LoggerConfig("TASK_USAGE", 2)
        task_usage_connector_config = ConnectorConfig("KAFKA_CONNECTOR",
                                                      "TASK-USAGE")
        task_event_logger_config = LoggerConfig("TASK_EVENT", 2)
        task_event_connector_config = ConnectorConfig("KAFKA_CONNECTOR",
                                                      "TASK-EVENT")
        cls.__log_manager = LogManager(
            [(task_usage_logger_config, task_usage_connector_config),
             (task_event_logger_config, task_event_connector_config)], msg_num,
            rate)

        return cls
コード例 #17
0
def main():
    # load in our configuration settings

    config = Config()
    if config.config_file is None:
        print 'BuildAgent config file not found; skipping step.'
        return

    generator = DeltaGenerator(config, LogManager(config))
    result = generator.run()
    if result and len(generator.error_message):
        print generator.error_message
    return 0
コード例 #18
0
    def build_summary(self, summary_files):
        """
        Function to build summary
        """
        total_size = 0
        DECIMAL = 2
        deleted_files = {"Files":None, "Size": None}

        # get total files
        total_files = len(summary_files)

        for index in range(total_files):
            # get total size in bytes
            total_size +=  summary_files[index]

        # KB
        total_size = round(total_size/1024, DECIMAL)
        unit = "KB"

        # MB
        if total_size > 1024:
            total_size = round(total_size/1024, DECIMAL)
            unit = "MB"

        # GB
        if total_size > 1024:
            total_size = round(total_size/1024, DECIMAL)
            unit = "GB"

        if total_files != 0:
            logger_summary = LogManager.instance().getLoggerSummary

            logger_summary.info(f"Files: {total_files}")
            logger_summary.info(f"Size: {total_size} {unit}")
            deleted_files["Files"] = total_files
            deleted_files["Size"] = f"{total_size} {unit}"

        return deleted_files
コード例 #19
0
 def register(self):
     self._registered = True
     LogManager(os.path.dirname(self._fname)).register(
         self._fname, self._typ, self._name)
コード例 #20
0
ファイル: plotter.py プロジェクト: ziyu-zhang/tfplus
 def register(self):
     if not self.registered:
         LogManager(self.folder).register(self.filename, 'image', self.name)
         self._registered = True
     pass
コード例 #21
0
def trickmode_viewing(m: LogManager):
    timestamp = datetime.utcnow()
    m.clear_state(timestamp)

    context = device_context(timestamp)
    context.rcu_version = "** SCENARIO: TRICKMODE VIEWING **"
    m.push_event(context)

    player = PageViewEvent(timestamp, 'player')
    m.push_event(player)

    viewing_start = timestamp
    f8d_events = request_channel_events('F8D')

    event = find_event_on_air(timestamp, f8d_events)
    view_1 = make_live_play_event(timestamp, event)
    m.push_event(view_1)

    offset = (timestamp - event.start_time).seconds
    remaining_duration = event.duration.seconds - offset
    offset += int(remaining_duration / 2)
    timestamp += timedelta(seconds=int(remaining_duration / 2))
    stop = make_live_stop_event(timestamp, viewing_start, event, offset)
    m.push_event(stop)
    media = Media(content_type=ContentTypeType(
        ContentTypeType.TUNER_SUB_TRICKMODE),
                  view_status=ViewStatusType(ViewStatusType.CAPTIONS),
                  booking_source=None,
                  event_source=None,
                  create_timestamp=timestamp,
                  duration=timestamp - viewing_start)
    playback_pause = make_playback_event(timestamp, viewing_start, event,
                                         media, offset, 0)
    m.push_event(playback_pause)

    pause_duration = timedelta(minutes=15)
    timestamp += pause_duration
    media.duration += pause_duration
    playback_play = make_playback_event(timestamp, viewing_start, event, media,
                                        offset, 1000)
    m.push_event(playback_play)

    timestamp += pause_duration
    media.duration += pause_duration
    pvr_stop = make_pvr_stop_event(
        timestamp, viewing_start, event, media, event.duration.seconds,
        (event.duration - (viewing_start - event.start_time)).seconds)
    m.push_event(pvr_stop)
コード例 #22
0
"""
import datetime
import json
import os
import sys

import requests
from lxml import html
from selenium import webdriver

from downloader import downloader
from log_manager import LogManager
from db.data_ops import DAO


logger = LogManager.getLogger('scraper')
# dao = DAO()

# Assuming `prev.json` is not in use by another process.
resources = os.path.join(
    os.path.abspath(os.path.join(__file__, os.path.pardir)),
    'resources'
)

prev_file = os.path.join(resources, 'prev.json')
with open(prev_file, 'r', encoding='utf-8') as f:
    prev = json.load(f)
# prev = dao.get_to_be_downloaded()
logger.info("Loaded the want-to-download anime list")

home = os.path.expanduser("~")
コード例 #23
0
    logger.debug(script_stats)
    db.scriptstats.save(script_stats)
    return script_stats


def update_stats(script_stats):
    script_stats['running_count'] = 0
    script_stats['processed_requests'] = processed_requests
    script_stats['last_completed_at'] = datetime.now().strftime(
        '%A, %d. %B %Y %H:%M:%S')
    db.scriptstats.save(script_stats)


if __name__ == '__main__':
    endpoints = ['bloomington', 'baltimore', 'boston']
    lm = LogManager()
    logger = lm.logger
    connection = Connection(os.environ['MONGO_URI'])
    db = connection[os.environ['MONGO_DATABASE']]
    while (True):
        for endpoint in endpoints:
            processed_requests = 0
            script_stats = increment_running_count_in_stats()
            logger.info('downloading requests from {0}...'.format(endpoint))
            city = three.city(endpoint)
            download_requests()
            logger.info('setting geospatial index on loc field')
            db.requests.ensure_index([("loc", GEO2D)])
            mark_requests_with_boundaries()
            update_stats(script_stats)
        logger.debug('requests downloaded; sleeping for 1 hour')
コード例 #24
0
 def __init__(self):
     super(Haedong, self).__init__()
     self.log, self.res, self.err_log = LogManager.__call__().get_logger()
コード例 #25
0
dir_name = os.path.join(cwd, 'ion_files')
try:
    # Create ion files target Directory
    os.mkdir(dir_name)
except FileExistsError:
    # remove old ion files
    files = os.path.join(dir_name, '*.10n')
    for file in glob.glob(files):
        os.unlink(file)
    # remove old json files
    files = os.path.join(dir_name, '*.json')
    for file in glob.glob(files):
        os.unlink(file)

manager = LogManager(sequence_counter=random.randint(1, 101) << 16,
                     max_events=100,
                     send_period=600,
                     path=dir_name)

manager.set_identity(
    hw_version='17.27.0.C',
    hw_id=bytes.fromhex('2b9c5d351a879a25b86851adc36acea6'),
    hw_client_id='62081957540',
    hw_card_id='000229047600',
    ams_id=bytes.fromhex(
        '026b45850456f79041d9fcf54b8fddf51ad41d8cd98f00b204e9800998ecf8427e'),
    ams_panel=1,
    app_version='1.16.1.9')

try:
    manager.start()
    power_states_activity(manager)
コード例 #26
0
ファイル: downloader.py プロジェクト: abhiroyg/scrape-anime
import sys

from clint.textui import progress
from lxml import html
import notify2
import requests
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from urllib.parse import unquote

from log_manager import LogManager

logger = LogManager.getLogger('downloader')


def get_embedded_video_links(gogoanime_episode_url):
    """
    TODO:
    When we are downloading a series,
    Common url is:
        http://www.gogoanime.com/battery-episode-3
    But, some episodes will have urls like
        http://www.gogoanime.com/battery-episode-3-episode-3
    So, this function if it could not find videos
    in the common url. It will go to previous page
    if it exists and clicks <Next episode>.
    Sometimes, <Next episode> leads to a `movie/special/ova/ona`
    page, so it will skip those and gets the correct url.
コード例 #27
0
 def __init__(self):
     self.logger = LogManager("chatApp")
     self.chat_message_repository = ChatMessageRepository()
コード例 #28
0
class OutputManager(object):
    def __init__(self, config, training_manager, sess, batch_tensor_val):
        """ If we want to visualize the outputs """
        #terminal_draw_tool.start_curses()
        #terminal_draw_tool.curses_started = True

        self._batch_size = config.batch_size
        self._training_manager = training_manager
        self._sess = sess  # the session from tensorflow that is going to be used
        self._config = config
        self._training_start_time = time.time()

        self._logger = LogManager(config, training_manager, sess,
                                  self._config.use_curses)
        self.tensorboard_scalars()
        self.tensorboard_images()
        self._merged = tf.summary.merge_all()
        self._validater = ValidationManager(config, training_manager, sess,
                                            batch_tensor_val, self._merged)
        self.first_time = True
        self.store_steer = True

    def tensorboard_scalars(self):

        #with tf.device('/gpu:0'):
        """ This is the program general loss """
        tf.summary.scalar('Loss',
                          tf.reduce_mean(self._training_manager.get_loss()))
        """ This is the loss energy vec """
        energy_tensor_vec = convert_mat_to_tensor(
            self._training_manager.get_variable_energy(),
            self._config.branch_config)
        print 'energy'
        print energy_tensor_vec
        for i in range(len(energy_tensor_vec)):
            for j in range(len(
                    self._config.branch_config[i])):  # for other branches
                #print tf.squeeze(energy_tensor_vec[i +j])
                tf.summary.scalar(
                    'Energy_B_' + str(i) + '_' +
                    self._config.branch_config[i][j],
                    tf.squeeze(energy_tensor_vec[i][j]))

        print 'error'
        variables_tensor_vec = convert_mat_to_tensor(
            self._training_manager.get_variable_error(),
            self._config.branch_config)

        for i in range(len(variables_tensor_vec)):
            for j in range(len(
                    self._config.branch_config[i])):  # for other branches
                #print tf.squeeze(variables_tensor_vec[i +j])
                tf.summary.scalar(
                    'Error_B_' + str(i) + '_' +
                    self._config.branch_config[i][j],
                    tf.squeeze(variables_tensor_vec[i][j]))
        """ This is the error vecs for the training """

        self._train_writer = tf.summary.FileWriter(
            self._config.train_path_write, self._sess.graph)

    def tensorboard_images(self):

        tf.summary.image('Image_input', self._training_manager._input_images)
        #tf.summary.image('Image_vbp',self._training_manager._vis_images)

    def write_tensorboard_summary(self, i):

        feedDict = self._training_manager.get_feed_dict()
        feedDict[self._training_manager._dout] = [1.0] * len(
            self._config.dropout)
        summary = self._sess.run(self._merged, feed_dict=feedDict)

        self._train_writer.add_summary(summary, i)

    def print_outputs(self, i, duration):

        # the dictonary of the data used for training
        if i % self._config.print_interval == 0:

            self._logger.update_log_state(i, duration)
        """ Writing summary """
        if i % self._config.summary_writing_period == 0 or self.first_time:
            #print 'Summari'
            self._logger.write_summary(i)
            #self._logger.write_images()

            self.write_tensorboard_summary(i)
            #terminal_draw_tool.clear()
            self.first_time = False

            #self._sess.run(self._training_manager._vis_images,feed_dict=self._training_manager.get_feed_dict())

            self._logger.print_screen_track(i, duration)

            #""" Validating """
        if i % self._config.validation_period == 0:

            self._validater.run(i)
        '''if self.store_steer == True:
コード例 #29
0
 def init_logger(self):
     self.log, self.res, self.err_log = LogManager.__call__().get_logger()
コード例 #30
0
ファイル: flask_app.py プロジェクト: quru/qis
configure_app(app)
extend_app(app)

with app.app_context():
    from cache_manager import CacheManager
    from data_manager import DataManager
    from image_manager import ImageManager
    from log_manager import LogManager
    from permissions_manager import PermissionsManager
    from stats_manager import StatsManager
    from task_manager import TaskManager

    # Create the logging client+server
    logger = LogManager(
        __about__.__tag__.lower() + '_' + str(os.getpid()),
        app.config['DEBUG'],
        app.config['LOGGING_SERVER'],
        app.config['LOGGING_SERVER_PORT']
    )
    app.log = logger
    LogManager.run_server(
        app.config['LOGGING_SERVER'],
        app.config['LOGGING_SERVER_PORT'],
        __about__.__tag__.lower() + '.log',
        app.config['DEBUG']
    )
    # Capture Flask's internal logging
    app.logger.addHandler(logger.logging_handler)

    # Announce startup
    logger.info(__about__.__title__ + ' v' + __about__.__version__ + ' engine startup')
    logger.info('Using settings ' + app.config['_SETTINGS_IN_USE'])
コード例 #31
0
ファイル: DM.py プロジェクト: JinSakuma/ZoomWoZSystem
class DM:
    def __init__(self, config):
        self.config = config
        self.api = DB_API(self.config)
        self.logger = LogManager(self.config)
        self.cash_output = None
        self.cash_command = None
        self.cash_slot = None

    def get_recommendation(self, slot, target, type='passive'):
        '''
        slot: dict
            key: genre ジャンル
            key: person 人名
            key: sort_by 並び替え条件(eva_gt/eval_lt/eval_eq/day_gt/day_lt/day_eq)
            key: history 他にフラグ
        '''
        # DBのmainテーブルから映画情報を取得
        if slot['genre'] is not None and slot['person'] is None:
            df = self.api.search_movie_by_genre(slot['genre'])

        elif slot['person'] is not None and slot['genre'] is None:
            pid = self.api.person2id(slot['person'])
            df = self.api.search_movie_by_crew(pid)

        elif slot['genre'] is not None and slot['person'] is not None:
            pid = self.api.person2id(slot['person'])
            df_genre = self.api.search_movie_by_genre(slot['genre'])
            df_person = self.api.search_movie_by_crew(pid)
            df = pd.merge(df_genre, df_person)
        else:
            df = self.api.search_movie()

        # 取得した映画群を条件でソート
        if slot['sort_by'] == 'eval_gt' or slot[
                'sort_by'] == 'eval_lt' or slot['sort_by'] == 'eval_eq':
            # 一定の投票数以下の作品は除く
            df = df[df['vote'] > self.config['DM']['vote_min']]
            # 履歴の参照
            used_mid_list = self.logger.get_topic_history()
            prev_mid = used_mid_list[-1]
            prev_score = self.api.get_evaluation(prev_mid)

            if slot['sort_by'] == 'eval_gt':
                df = df.sort_values('evaluation', ascending=False)
                df = df[df['evaluation'] > prev_score]
            elif slot['sort_by'] == 'eval_lt':
                df = df.sort_values('evaluation', ascending=True)
                df = df[df['evaluation'] < prev_score]
            else:
                df = df.sort_values('evaluation', ascending=False)
                df = df[(df['evaluation'] > prev_score - 1)]
                df = df[(df['evaluation'] < prev_score + 1)]

        else:
            print("#############")
            df = df.dropna(subset=['pronunciation'])
            df = df.sort_values('popularity', ascending=False)

        if slot['history']:
            N = int(self.config['DM']['N'])
            df = df[:N]
            # 履歴の参照
            used_mid_list = self.logger.get_intoduced_mid_list()
            df = df[~df['movie_id'].isin(used_mid_list)]

        if len(df) > 0:
            # ランダムに選ぶ
            info = df.sample()
            pron = info['pronunciation'].iloc[0]
            if pron is not None:
                topic = pron
            else:
                topic = info['title'].iloc[0]
            mid = info['movie_id'].iloc[0]
        else:
            topic = ''
            mid = None
            pron = None

        gid = slot['genre']
        if gid is None:
            genre = None
        else:
            genre = self.api.id2genre(gid)

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=str(mid),
                         command='recommendation',
                         type=type)

        id = self.logger.write(data_dict, slot, [topic])

        output = {'topic': topic, 'mid': mid, 'pron': pron, 'genre': genre}
        return output, id

    def get_cast(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
            key: history 他にフラグ
        '''
        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        job_id = self.config['DM']['job_id_cast']
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            cast_list = self.api.get_crew(mid, job_id)['name_ja'].tolist()
            if cast_list[0] is None:
                cast_list = self.api.get_crew(mid, job_id)['name_en'].tolist()

        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                movie_info = df.iloc[0]
                mid = movie_info['movie_id']
                cast_list = self.api.get_crew(mid, job_id)['name_ja'].tolist()
                if cast_list[0] is None:
                    cast_list = self.api.get_crew(mid,
                                                  job_id)['name_en'].tolist()
            else:
                cast_list = []
                mid = -1

        # history=Trueなら履歴を参照する
        N = int(self.config['DM']['cast_num'])
        if slot['history']:
            introduced_cast_list = self.logger.get_intoduced_list('cast', mid)
            cnt = 0
            cast_list_new = []
            for cast in cast_list:
                if cast not in introduced_cast_list:
                    cast_list_new.append(cast)
                    cnt += 1
                if cnt >= N:
                    break

            if len(cast_list) > len(introduced_cast_list) + N:
                state = 1
            else:
                state = 0
        else:
            cast_list_new = cast_list[:N]
            if len(cast_list) > N:
                state = 1
            else:
                state = 0

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='cast',
                         state=state,
                         type=type)

        id = self.logger.write(data_dict, slot, cast_list_new)

        output = {
            'person_list': cast_list_new,
            'topic': title,
            'mid': mid,
            'history': slot['history']
        }
        return output, id

    def get_director(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
        '''
        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        job_id = self.config['DM']['job_id_director']
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            director_list = self.api.get_crew(mid, job_id)['name_ja'].tolist()
            if director_list[0] is None:
                director_list = self.api.get_crew(mid,
                                                  job_id)['name_en'].tolist()
        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                movie_info = df.iloc[0]
                mid = movie_info['movie_id']
                director_list = self.api.get_crew(mid,
                                                  job_id)['name_ja'].tolist()
                if director_list[0] is None:
                    director_list = self.api.get_crew(
                        mid, job_id)['name_en'].tolist()
            else:
                director_list = []
                mid = -1

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='director',
                         type=type)

        id = self.logger.write(data_dict, slot, director_list)

        output = {'person_list': director_list, 'topic': title, 'mid': mid}
        return output, id

    def get_cast_detail(self, slot, target, type='passive'):
        '''
        slot: dict
            key: person 人物名
            key: history 他にフラグ
        '''
        pid = self.api.person2id(slot['person'])

        if pid is not None:
            df = self.api.get_credit(pid)
            topic_list = df['title'].tolist()
            pron_list = df['pron'].tolist()
            N = int(self.config['DM']['cast_detail_num'])
            if slot['history']:
                introduced_cast_detail_list = self.logger.get_intoduced_list(
                    'cast_detail', pid)
                cnt = 0
                topic_list_new = []
                for i in range(len(topic_list)):
                    if pron_list[i] is not None:
                        topic = pron_list[i]
                    else:
                        topic = topic_list[i]
                    if topic not in introduced_cast_detail_list:
                        topic_list_new.append(topic)
                        cnt += 1
                    if cnt >= N:
                        break

                if len(topic_list) > len(introduced_cast_detail_list) + N:
                    state = 1
                else:
                    state = 0
            else:
                topic_list_new = topic_list[:N]
                if len(topic_list) > N:
                    state = 1
                else:
                    state = 0

        else:
            topic_list_new = []
            state = 0

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=pid,
                         command='cast_detail',
                         state=state,
                         type=type)

        id = self.logger.write(data_dict, slot, topic_list_new)

        topic = self.logger.get_topic_title()
        mid = self.logger.get_topic_mid()

        output = {'cast_detail': topic_list_new, 'topic': topic, 'mid': mid}
        print(output)
        return output, id

    def get_director_detail(self, slot, target, type='passive'):
        '''
        slot: dict
            key: person 人物名
            key: history 他にフラグ
        '''
        pid = self.api.person2id(slot['person'])
        if pid is not None:
            df = self.api.get_credit(pid)
            topic_list = df['title'].tolist()

            N = int(self.config['DM']['director_detail_num'])
            if slot['history']:
                introduced_director_detail_list = self.logger.get_intoduced_list(
                    'director_detail', pid)
                cnt = 0
                topic_list_new = []
                for topic in topic_list:
                    if topic not in introduced_director_detail_list:
                        topic_list_new.append(topic)
                        cnt += 1
                    if cnt >= N:
                        break

                if len(topic_list) > len(introduced_director_detail_list) + N:
                    state = 1
                else:
                    state = 0
            else:
                topic_list_new = topic_list[:N]
                if len(topic_list) > N:
                    state = 1
                else:
                    state = 0
        else:
            topic_list_new = []
            state = 0

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=pid,
                         command='director_detail',
                         state=state,
                         type=type)
        id = self.logger.write(data_dict, slot, topic_list_new)

        topic = self.logger.get_topic_title()
        mid = self.logger.get_topic_mid()

        output = {
            'director_detail': topic_list_new,
            'topic': topic,
            'mid': mid
        }
        return output, id

    def get_tips(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
            key: tag 情報の種類 ex. overview/series
            key: history 他にフラグ
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            df_tips = self.api.get_tips(mid)
            if slot['tag'] is not None:
                tip_list = df_tips[df_tips['tag'] ==
                                   slot['tag']]['tips'].tolist()
            else:
                tip_list = df_tips['tips'].tolist()

        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                movie_info = df.iloc[0]
                mid = movie_info['movie_id']
                df_tips = self.api.get_tips(mid)
                if slot['tag'] is not None:
                    tip_list = df_tips[df_tips['tag'] ==
                                       slot['tag']]['tips'].tolist()
                else:
                    tip_list = df_tips['tips'].tolist()
            else:
                df_tips = pd.DataFrame({})
                tip_list = []
                mid = -1

        # history=Trueなら履歴を参照する
        if slot['history']:
            introduced_tips_list = self.logger.get_intoduced_list('tips', mid)
            output = None
            for tip in tip_list:
                if tip not in introduced_tips_list:
                    output = tip
                    break

            if len(df_tips) > len(introduced_tips_list) + 1:
                state = 1
            else:
                state = 0

        else:
            if len(df_tips) > 1:
                state = 1
                output = tip_list[0]
            elif len(df_tips) == 1:
                state = 0
                output = tip_list[0]
            else:
                state = 0
                output = None

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='tips',
                         state=state,
                         type=type)

        id = self.logger.write(data_dict, slot, [output])

        output = {'tips': output, 'topic': title, 'mid': mid}
        return output, id

    def get_review(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
            key: history 他にフラグ
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            review_list = self.api.get_review(mid)['review'].tolist()

        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                movie_info = df.iloc[0]
                mid = movie_info['movie_id']
                review_list = self.api.get_tips(mid)['review'].tolist()
            else:
                review_list = []
                mid = -1

        # history=Trueなら履歴を参照する
        if slot['history']:
            introduced_review_list = self.logger.get_intoduced_list(
                'review', mid)
            output = None
            for review in review_list:
                if review not in introduced_review_list:
                    output = review
                    break

            if len(review_list) > len(introduced_review_list) + 1:
                state = 1
            else:
                state = 0

        else:
            if len(review_list) > 1:
                state = 1
                output = review_list[0]
            elif len(review_list) == 1:
                state = 0
                output = review_list[0]
            else:
                state = 0
                output = None

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='review',
                         state=state,
                         type=type)

        id = self.logger.write(data_dict, slot, [output])

        output = {'review': output, 'topic': title, 'mid': mid}
        return output, id

    def get_evaluation(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            df = self.api.search_movie_by_id(mid)
            if len(df) > 0:
                assert len(df) == 1, '1 id has multiple movies mid: {}'.format(
                    mid)
                score = df['evaluation'].iloc[0]
            else:
                score = None
                mid = -1
        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                assert len(
                    df) == 1, '1 title has multiple movies mid: {}'.format(
                        title)
                score = df['evaluation']
            else:
                score = None
                mid = -1

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='evaluation',
                         type=type)

        id = self.logger.write(data_dict, slot, [score])

        output = {'evaluation': score, 'topic': title, 'mid': mid}
        return output, id

    def get_genres(self, slot, target, type='passive'):
        '''
        slot: dict
            key: title 映画タイトル
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
            genre_list = self.api.get_genre(mid)['genre_id'].tolist()
        # 話題に上がっていない場合
        else:
            df = self.api.search_movie_by_title('title')
            if len(df) > 0:
                movie_info = df.iloc[0]
                mid = movie_info['movie_id']
                genre_list = self.api.get_genre(mid)['genre_id'].tolist()
            else:
                genre_list = []
                mid = -1

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='genre',
                         type=type)

        id = self.logger.write(data_dict, slot, genre_list)

        genres = [self.api.id2genre(gid) for gid in genre_list]
        output = {'genres': genres, 'topic': title, 'mid': mid}
        return output, id

    def get_question(self, slot, target, type='active'):
        '''
        slot: dict
            key: title 映画タイトル
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
        # 話題に上がっていない場合
        else:
            mid = None

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='question',
                         type=type)

        id = self.logger.write(data_dict, slot, [])
        output = {'topic': title, 'mid': mid}
        return output, id

    def get_title(self, slot, target, type='correcrion'):
        '''
        slot: dict
            key: title 映画タイトル
        '''

        title = slot['title']
        title_list = self.logger.get_topic_history()
        mid_list = self.logger.get_mid_history()
        # すでに話題に上がっている場合
        if title in title_list:
            mid = mid_list[title_list.index(title)]
        # 話題に上がっていない場合
        else:
            mid = None

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='title',
                         type=type)

        id = self.logger.write(data_dict, slot, [])
        output = {'topic': title, 'mid': mid}
        return output, id

    def get_repeat(self, slot, target, type='correction'):
        '''
        slot: dict
            key:
        '''

        mid_list = self.logger.get_mid_history()
        if len(mid_list) > 0:
            mid = mid_list[-1]
        else:
            mid = None

        # 対話履歴をセット
        data_dict = self.logger.get_main_data_dict()
        data_dict.update(action='utter',
                         target=target,
                         topic=mid,
                         command='repeat',
                         type=type)

        id = self.logger.write(data_dict, slot, [])
        output = self.cash_output
        output["command"] = self.cash_command
        output["slot"] = self.cash_slot
        return output, id

    def main(self, command, slot, target, type='passive'):
        '''
        DB, 対話履歴を参照してNLGに渡す情報を決定
        条件に応じた発話を生成
        Parameters
        ------------
        command: str
          命令 ex.recommend, person, overview
        slot: dict
          命令の引数 ex. recommend->{"genre": _, "actor": _, "director": _}
        ------------

        commandの種類
        1. recommend        slot: {"genre": _, "person": _"sort_by":_, "history":_}, "それでは、{topic}はどうでしょう?"
        2. cast             slot: {"title": _, "job": _, "history":_}, "{person}が出演しています"
        3. director         slot: {"title": _, "job": _},  "監督は{person}です"
        4. cast_detail      slot: {"person_name", "history":_}, "~や~に出演しています"
        5. director_detail  slot: {"person_name", "history":_}, "~や~を手掛けています"
        6. tips             slot: {"title": _, "history":_}, "XXのシリーズY作目の映画です"
        7. review           slot: {"title": _, "history":_}, "~だったって"
        8. evaluation       slot: {"title": _}, "評価はが{score}点だよ"
        9. genre            slot: {"title": _}, "{genre}だよ"
        10. detail_active   slot: None, 2,3,7~10から選ぶ
        11. pardon          slot: None, "もう一回言ってもらえますか?"
        12. unknown         slot: None, "ちょっとわからないなぁ"
        13.start            slot: None, "どんな映画が観たいですか?"
        14.end              slot: None, "行ってらっしゃい"
        15.yes              slot: None, "はい、そうです"
        16.no               slot: None, "違います"
        17.question         slot: None, "XXは興味ありますか?"
        18.sumarize         slot: NONE, "観に行く映画は決まりましたか?"
        '''

        # commandを履歴に保存
        if 'recommendation' in command:
            output, id = self.get_recommendation(slot, target, type)

        elif command == 'cast':
            output, id = self.get_cast(slot, target, type)

        elif command == 'director':
            output, id = self.get_director(slot, target, type)

        elif command == 'cast_detail':
            output, id = self.get_cast_detail(slot, target, type)

        elif command == 'director_detail':
            output, id = self.get_director_detail(slot, target, type)

        elif command == 'tips':
            output, id = self.get_tips(slot, target, type)

        elif command == 'review':
            output, id = self.get_review(slot, target, type)

        elif command == 'evaluation':
            output, id = self.get_evaluation(slot, target, type)

        elif command == 'genre':
            output, id = self.get_genres(slot, target, type)

        elif command == 'question':
            output, id = self.get_question(slot, target, type)

        elif command == 'title':
            output, id = self.get_title(slot, target, type)

        elif command == 'repeat':
            output, id = self.get_repeat(slot, target, type)

        elif command in ["yes", "no", "unknown", "start", "end", "summarize"]:
            # 対話履歴をセット
            data_dict = self.logger.get_main_data_dict()
            data_dict.update(action='utter',
                             target=target,
                             command=command,
                             type=type)
            id = self.logger.write(data_dict, slot, [])
            output = {}

        else:
            id = None
            output = {}

        self.cash_output = output
        self.cash_command = command
        self.cash_slot = slot

        return output, id
コード例 #32
0
ファイル: repository.py プロジェクト: haimroizman/chat
 def __init__(self, section):
     self.chat_db_config = self.read_db_config(section)  # Open Database connection
     self.logger = LogManager("chatApp")
コード例 #33
0
same time
"""
import datetime
import json
import os
import sys

import requests
from lxml import html
from selenium import webdriver

from downloader import downloader
from log_manager import LogManager
from db.data_ops import DAO

logger = LogManager.getLogger('scraper')
# dao = DAO()

# Assuming `prev.json` is not in use by another process.
resources = os.path.join(
    os.path.abspath(os.path.join(__file__, os.path.pardir)), 'resources')

prev_file = os.path.join(resources, 'prev.json')
with open(prev_file, 'r', encoding='utf-8') as f:
    prev = json.load(f)
# prev = dao.get_to_be_downloaded()
logger.info("Loaded the want-to-download anime list")

home = os.path.expanduser("~")
driver = webdriver.Chrome(os.path.join(home, 'locallib/chromedriver'))
driver.maximize_window()
コード例 #34
0
ファイル: main.py プロジェクト: a740122/BT-Share
import os
import hashlib
import requests
import socket
from requests.exceptions import RequestException, Timeout
from pymongo import Connection

from config import MONGO, UPDATE_NUM, REQUEST_TIMEOUT
from torrentparser import TorrentParser, ParsingError
from log_manager import LogManager

# TOOD single file
mongo = Connection(host=MONGO['host'], port=MONGO['port'])
db = mongo[MONGO['db']]

logger = LogManager("application.log")

# TODO implement it in C
tp = TorrentParser.get_instance()


#parse torrent file
def parse_torrent_file(file):
    try:
        tp.parse_torrent(file)
        return {
            "name": tp.get_torrent_name(),
            "file_list": tp.get_files_details(),
            "creation_date": tp.get_creation_date(),
            "tracker_url": tp.get_tracker_url(),
            "client_name": tp.get_client_name(),
コード例 #35
0
def power_states_activity(m: LogManager):
    timestamp = datetime.utcnow()
    m.clear_state(timestamp)

    context = device_context(timestamp)
    context.rcu_version = "** SCENARIO: POWER STATES **"
    m.push_event(context)

    power_on = PowerStatusEvent(timestamp, PowerStateType.POWER_ON, False)
    m.push_event(power_on)

    timestamp += timedelta(seconds=1)
    display_on = VideoOutputEvent(timestamp, True, '', '1.x', '1.x', '1080i',
                                  '50', '2b9c5d351a879a25b86851adc36acea6')
    m.push_event(display_on)

    timestamp += timedelta(seconds=1)
    player = PageViewEvent(timestamp, 'player')
    m.push_event(player)

    timestamp += timedelta(seconds=1)
    viewing_start = timestamp
    viewing_dur = 60
    event = get_live_event()
    view_1 = make_live_play_event(timestamp, event)
    m.push_event(view_1)

    timestamp += timedelta(seconds=viewing_dur)
    stop_1 = make_live_stop_event(timestamp, viewing_start, event, viewing_dur)
    m.push_event(stop_1)

    standby = PageViewEvent(timestamp, 'standby')
    m.push_event(standby)

    power_standby = PowerStatusEvent(timestamp, PowerStateType.STANDBY, True)
    m.push_event(power_standby)

    display_off = VideoOutputEvent(timestamp, False)
    m.push_event(display_off)

    timestamp += timedelta(hours=8)
    active = PowerStatusEvent(timestamp, PowerStateType.ACTIVE, True)
    m.push_event(active)

    timestamp += timedelta(seconds=1)
    display_on.timestamp = timestamp
    m.push_event(display_on)

    timestamp += timedelta(seconds=1)
    player.timestamp = timestamp
    m.push_event(player)
コード例 #36
0
def channel_surfing(m: LogManager):
    timestamp = datetime.utcnow()
    m.clear_state(timestamp)

    context = device_context(timestamp)
    context.rcu_version = "** SCENARIO: CHANNEL SURFING **"
    m.push_event(context)

    player = PageViewEvent(timestamp, 'player')
    m.push_event(player)

    viewing_start = timestamp
    f8d_events = request_channel_events('F8D')
    shc_events = request_channel_events('SHC')
    sha_events = request_channel_events('SHA')

    event = find_event_on_air(timestamp, f8d_events)
    view_1 = make_live_play_event(timestamp, event)
    m.push_event(view_1)

    timestamp = event.start_time + event.duration
    event = live_event_change(m, timestamp, event,
                              find_event_on_air(timestamp, f8d_events),
                              viewing_start)
    viewing_start = timestamp

    timestamp += timedelta(seconds=90)
    mini_guide = PageViewEvent(timestamp, 'miniGuide')
    m.push_event(mini_guide)

    timestamp += timedelta(seconds=5)
    event = live_event_change(m, timestamp, event,
                              find_event_on_air(timestamp, shc_events),
                              viewing_start)
    viewing_start = timestamp

    timestamp += timedelta(seconds=5)
    live_event_change(m, timestamp, event,
                      find_event_on_air(timestamp, sha_events), viewing_start)

    timestamp += timedelta(seconds=5)
    player = PageViewEvent(timestamp, 'player')
    m.push_event(player)
コード例 #37
0
ファイル: downloader.py プロジェクト: abhiroyg/scrape-anime
from clint.textui import progress
from lxml import html
import notify2
import requests
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from urllib.parse import unquote

from log_manager import LogManager


logger = LogManager.getLogger('downloader')

def get_embedded_video_links(gogoanime_episode_url):
    """
    TODO:
    When we are downloading a series,
    Common url is:
        http://www.gogoanime.com/battery-episode-3
    But, some episodes will have urls like
        http://www.gogoanime.com/battery-episode-3-episode-3
    So, this function if it could not find videos
    in the common url. It will go to previous page
    if it exists and clicks <Next episode>.
    Sometimes, <Next episode> leads to a `movie/special/ova/ona`
    page, so it will skip those and gets the correct url.
コード例 #38
0
ファイル: target.py プロジェクト: yue/build-gn
class Target(object):
  """Base class representing a Fuchsia deployment target."""

  def __init__(self, out_dir, target_cpu, logs_dir):
    self._out_dir = out_dir
    self._target_cpu = target_cpu
    self._command_runner = None
    self._symbolizer_proc = None
    self._log_listener_proc = None
    self._dry_run = False
    self._started = False
    self._log_manager = LogManager(logs_dir)
    self._ffx_runner = ffx_session.FfxRunner(self._log_manager)

  @staticmethod
  def CreateFromArgs(args):
    raise NotImplementedError()

  @staticmethod
  def RegisterArgs(arg_parser):
    pass

  # Functions used by the Python context manager for teardown.
  def __enter__(self):
    return self
  def __exit__(self, exc_type, exc_val, exc_tb):
    try:
      self.Stop()
    finally:
      # Stop the ffx daemon, since the target device is going / has gone away.
      # This ensures that the daemon does not become "hung" if the target device
      # stops responding to network I/O (e.g., due to emulator instance
      # teardown). The daemon will be automatically restarted by the next `ffx`
      # call.
      self._ffx_runner.daemon_stop()
      # Stop the log manager only after the last use of _ffx_runner.
      self._log_manager.Stop()

  def Start(self):
    """Handles the instantiation and connection process for the Fuchsia
    target instance."""
    raise NotImplementedError()

  def IsStarted(self):
    """Returns True if the Fuchsia target instance is ready to accept
    commands."""
    return self._started

  def GetFfxTarget(self):
    """Returns the FfxTarget instance to use to interact with this target."""
    raise NotImplementedError()

  def Stop(self):
    """Stop all subprocesses and close log streams."""
    if self._symbolizer_proc:
      self._symbolizer_proc.kill()
    if self._log_listener_proc:
      self._log_listener_proc.kill()

  def IsNewInstance(self):
    """Returns True if the connected target instance is newly provisioned."""
    return True

  def GetCommandRunner(self):
    """Returns CommandRunner that can be used to execute commands on the
    target. Most clients should prefer RunCommandPiped() and RunCommand()."""
    self._AssertIsStarted()

    if self._command_runner is None:
      host, port = self._GetEndpoint()
      self._command_runner = \
          remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)

    return self._command_runner

  def StartSystemLog(self, package_paths):
    """Start a system log reader as a long-running SSH task."""
    system_log = self._log_manager.Open('system_log')
    if package_paths:
      self._log_listener_proc = self.RunCommandPiped(['log_listener'],
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.STDOUT)
      self._symbolizer_proc = RunSymbolizer(self._log_listener_proc.stdout,
                                            system_log,
                                            BuildIdsPaths(package_paths))
    else:
      self._log_listener_proc = self.RunCommandPiped(['log_listener'],
                                                     stdout=system_log,
                                                     stderr=subprocess.STDOUT)

  def RunCommandPiped(self, command, **kwargs):
    """Starts a remote command and immediately returns a Popen object for the
    command. The caller may interact with the streams, inspect the status code,
    wait on command termination, etc.

    command: A list of strings representing the command and arguments.
    kwargs: A dictionary of parameters to be passed to subprocess.Popen().
            The parameters can be used to override stdin and stdout, for
            example.

    Returns: a Popen object.

    Note: method does not block.
    """
    logging.debug('running (non-blocking) \'%s\'.', ' '.join(command))
    return self.GetCommandRunner().RunCommandPiped(command, **kwargs)

  def RunCommand(self, command, silent=False, timeout_secs=None):
    """Executes a remote command and waits for it to finish executing.

    Returns the exit code of the command.
    """
    logging.debug('running \'%s\'.', ' '.join(command))
    return self.GetCommandRunner().RunCommand(command, silent,
                                              timeout_secs=timeout_secs)

  def EnsureIsolatedPathsExist(self, for_package, for_realms):
    """Ensures that the package's isolated /data and /tmp exist."""
    for isolated_directory in ['/data', '/tmp']:
      self.RunCommand([
          'mkdir', '-p',
          _MapIsolatedPathsForPackage(for_package, 0,
                                      for_realms)(isolated_directory)
      ])

  def PutFile(self,
              source,
              dest,
              recursive=False,
              for_package=None,
              for_realms=()):
    """Copies a file from the local filesystem to the target filesystem.

    source: The path of the file being copied.
    dest: The path on the remote filesystem which will be copied to.
    recursive: If true, performs a recursive copy.
    for_package: If specified, isolated paths in the |dest| are mapped to their
                 obsolute paths for the package, on the target. This currently
                 affects the /data and /tmp directories.
    for_realms: If specified, identifies the sub-realm of 'sys' under which
                isolated paths (see |for_package|) are stored.
    """
    assert type(source) is str
    self.PutFiles([source], dest, recursive, for_package, for_realms)

  def PutFiles(self,
               sources,
               dest,
               recursive=False,
               for_package=None,
               for_realms=()):
    """Copies files from the local filesystem to the target filesystem.

    sources: List of local file paths to copy from, or a single path.
    dest: The path on the remote filesystem which will be copied to.
    recursive: If true, performs a recursive copy.
    for_package: If specified, /data in the |dest| is mapped to the package's
                 isolated /data location.
    for_realms: If specified, identifies the sub-realm of 'sys' under which
                isolated paths (see |for_package|) are stored.
    """
    assert type(sources) is tuple or type(sources) is list
    if for_package:
      self.EnsureIsolatedPathsExist(for_package, for_realms)
      dest = _MapIsolatedPathsForPackage(for_package, 0, for_realms)(dest)
    logging.debug('copy local:%s => remote:%s', sources, dest)
    self.GetCommandRunner().RunScp(sources, dest, remote_cmd.COPY_TO_TARGET,
                                   recursive)

  def GetFile(self,
              source,
              dest,
              for_package=None,
              for_realms=(),
              recursive=False):
    """Copies a file from the target filesystem to the local filesystem.

    source: The path of the file being copied.
    dest: The path on the local filesystem which will be copied to.
    for_package: If specified, /data in paths in |sources| is mapped to the
                 package's isolated /data location.
    for_realms: If specified, identifies the sub-realm of 'sys' under which
                isolated paths (see |for_package|) are stored.
    recursive: If true, performs a recursive copy.
    """
    assert type(source) is str
    self.GetFiles([source], dest, for_package, for_realms, recursive)

  def GetFiles(self,
               sources,
               dest,
               for_package=None,
               for_realms=(),
               recursive=False):
    """Copies files from the target filesystem to the local filesystem.

    sources: List of remote file paths to copy.
    dest: The path on the local filesystem which will be copied to.
    for_package: If specified, /data in paths in |sources| is mapped to the
                 package's isolated /data location.
    for_realms: If specified, identifies the sub-realm of 'sys' under which
                isolated paths (see |for_package|) are stored.
    recursive: If true, performs a recursive copy.
    """
    assert type(sources) is tuple or type(sources) is list
    self._AssertIsStarted()
    if for_package:
      sources = map(_MapIsolatedPathsForPackage(for_package, 0, for_realms),
                    sources)
    logging.debug('copy remote:%s => local:%s', sources, dest)
    return self.GetCommandRunner().RunScp(sources, dest,
                                          remote_cmd.COPY_FROM_TARGET,
                                          recursive)

  def GetFileAsString(self, source):
    """Reads a file on the device and returns it as a string.

    source: The remote file path to read.
    """
    cat_proc = self.RunCommandPiped(['cat', source],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT)
    stdout, _ = cat_proc.communicate()
    if cat_proc.returncode != 0:
      raise Exception('Could not read file %s on device.', source)
    return stdout.decode('utf-8')

  def _GetEndpoint(self):
    """Returns a (host, port) tuple for the SSH connection to the target."""
    raise NotImplementedError()

  def _GetTargetSdkArch(self):
    """Returns the Fuchsia SDK architecture name for the target CPU."""
    if self._target_cpu == 'arm64' or self._target_cpu == 'x64':
      return self._target_cpu
    raise FuchsiaTargetException('Unknown target_cpu:' + self._target_cpu)

  def _AssertIsStarted(self):
    assert self.IsStarted()

  def _ConnectToTarget(self):
    logging.info('Connecting to Fuchsia using SSH.')

    host, port = self._GetEndpoint()
    end_time = time.time() + common.ATTACH_RETRY_SECONDS
    ssh_diagnostic_log = self._log_manager.Open('ssh_diagnostic_log')
    while time.time() < end_time:
      runner = remote_cmd.CommandRunner(self._GetSshConfigPath(), host, port)
      ssh_proc = runner.RunCommandPiped(['true'],
                                        ssh_args=['-v'],
                                        stdout=ssh_diagnostic_log,
                                        stderr=subprocess.STDOUT)
      if ssh_proc.wait() == 0:
        logging.info('Connected!')
        self._started = True
        self._command_runner = runner
        return True
      time.sleep(_ATTACH_RETRY_INTERVAL)

    logging.error('Timeout limit reached.')

    raise FuchsiaTargetException('Couldn\'t connect using SSH.')

  def _DisconnectFromTarget(self):
    pass

  def _GetSshConfigPath(self, path):
    raise NotImplementedError()

  def GetPkgRepo(self):
    """Returns an PkgRepo instance which serves packages for this Target.
    Callers should typically call GetPkgRepo() in a |with| statement, and
    install and execute commands inside the |with| block, so that the returned
    PkgRepo can teardown correctly, if necessary.
    """
    raise NotImplementedError()

  def InstallPackage(self, package_paths):
    """Installs a package and it's dependencies on the device. If the package is
    already installed then it will be updated to the new version.

    package_paths: Paths to the .far files to install.
    """
    with self.GetPkgRepo() as pkg_repo:
      # Publish all packages to the serving TUF repository under |tuf_root|.
      for package_path in package_paths:
        pkg_repo.PublishPackage(package_path)

      # Resolve all packages, to have them pulled into the device/VM cache.
      for package_path in package_paths:
        package_name, package_version = _GetPackageInfo(package_path)
        logging.info('Installing %s...', package_name)
        return_code = self.RunCommand(
            ['pkgctl', 'resolve',
             _GetPackageUri(package_name), '>/dev/null'],
            timeout_secs=_INSTALL_TIMEOUT_SECS)
        if return_code != 0:
          raise Exception(
              'Error {} while resolving {}.'.format(return_code, package_name))

      # Verify that the newly resolved versions of packages are reported.
      for package_path in package_paths:
        # Use pkgctl get-hash to determine which version will be resolved.
        package_name, package_version = _GetPackageInfo(package_path)
        pkgctl = self.RunCommandPiped(
            ['pkgctl', 'get-hash',
             _GetPackageUri(package_name)],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        pkgctl_out, pkgctl_err = pkgctl.communicate()

        # Read the expected version from the meta.far Merkel hash file alongside
        # the package's FAR.
        meta_far_path = os.path.join(os.path.dirname(package_path), 'meta.far')
        meta_far_merkel = subprocess.check_output(
            [common.GetHostToolPathFromPlatform('merkleroot'),
             meta_far_path]).split()[0]
        if pkgctl_out != meta_far_merkel:
          raise Exception('Hash mismatch for %s after resolve (%s vs %s).' %
                          (package_name, pkgctl_out, meta_far_merkel))

  def RunFFXCommand(self, ffx_args):
    """Automatically gets the FFX path and runs FFX based on the
    arguments provided.

    Args:
      ffx_args: The arguments for a ffx command.

    Returns:
      A Popen object for the command.
    """
    return self._ffx_runner.open_ffx(ffx_args)