예제 #1
0
 def __init__(self, material, thickness, flipped=False):
     super().__init__()
     self._cuts = Paths()
     self._lines = Paths()
     self._regions = []
     self._material = material
     self._thickness = thickness
     self._flipped = flipped
예제 #2
0
def export2txt_words4exam(words4exam: List[str]):
    """TODO 連日に渡るIT用語の重複回避を分析する用途として用語リストを保存する."""

    dirpath = Paths().DIR_words4analyze
    # ファイル出力用のディレクトリが存在しない場合、新規作成する
    make_newdir(dirpath)

    # 用語リストを本日付けで保存する
    newfile = Paths().gen_FILE_WORD_LIST
    shutil.copy(Paths().PATH_template_1st, dirpath + newfile)
예제 #3
0
def upload2slack(dirpath: str, newfile: str):
    """生成したファイルを指定のSlackチャンネルに送信する"""
    # 入力チェック
    if dirpath is None:
        # dirpath をデフォルトで設定する
        dirpath = Paths().DIR_exam_papers

    if newfile is None:
        newfile = Paths().FILE_EXAM_PAPER
        # 本日付けの確認テスト用ファイルを探索し、存在しない場合は異常終了
        if not os.path.exists(dirpath + newfile):
            print(' Today\'s file not found: ')
            sys.exit(1)

    URL_UPLOAD = "https://slack.com/api/files.upload"

    with open(dirpath + newfile, 'rb') as f:
        f = {'file': f.read()}
        p = {
            'token': env.TOKEN,
            'channels': env.CHANNEL_ID,
            'filename': newfile,
            'filetype': 'md',
            'initial_comment': "―【説明】―――――――――――――――――\
                                \n *添付ファイルをダウンロード後、以下を行いアップロードして下さい。*\
                                \n  1. 解答欄に記述\
                                \n  2. 記述後、ファイルを保存\
                                \n  3. ファイル名の変更(\"~_name.md\"  ← name を変更する)\
                                \n\
                                \n―【解答方法】―――――――――――――――\
                                \n *合計 10P に達するように、解答用紙記載の用語リストを用いて文章を作成せよ。*\
                                \n  ・参考リンクに記載のサイト等を利用し、自身で意味を調べて解答すること。\
                                \n  ・1つの文章は、句点までを1文とみなす。\
                                \n  ・1文あたり何語使用するかで加点が異なる。\
                                \n   - 1語のみ : 1P UP↑\
                                \n   - 2語   : 3P UP↑\
                                \n   - 3語以上 : 5P UP↑\
                                \n  ・用語リスト内から選択する際、1語以上であれば使用語数に制限はない。\
                                \n  ・各文章間で用語が重複している場合、その文章は無効とする。\
                                \n ",
            'title': "解答用紙_IT用語テスト",
        }
        r = requests.post(url=URL_UPLOAD, params=p, files=f)

    if str(r.status_code) == '200':
        print(' Uploaded.')
    else:
        print(' Upload_failed: ', r)

    return
예제 #4
0
    def __init__(self, name, dir_path=None):

        self._logger = Logger("Fixture {0}".format(name))

        path_name = Paths().get_fixture_path(name, only_name=True)

        if dir_path:
            conf_rel_path = Paths().fixture_conf_file_rel_pattern.format(path_name)
            self._conf_path = os.path.join(dir_path, conf_rel_path)
        else:
            self._conf_path = Paths().get_fixture_path(name)

        self.model = self._load_model(name)
        self.history = grid_history.GriddingHistory(self)
예제 #5
0
    def __init__(self):

        self.paths = Paths()

        self.data_conf = ConfigParser.SafeConfigParser()

        self.read()
예제 #6
0
 def __init__(self):
     """
    Initialize the topology with an empty graph and set of paths
    """
     self.G = nx.Graph()
     self.paths = Paths(self.G)
     self.reset()
     self.hosts = dict()
     self.flows = dict()
예제 #7
0
def export2md_paper(words4exam: List[str]):
    dirpath = Paths().DIR_exam_papers
    # ファイル出力用のディレクトリが存在しない場合、新規作成する
    make_newdir(dirpath)

    # テンプレをコピー&本日付けで別ファイルとして保存する
    newfile = Paths().gen_FILE_EXAM_PAPER()
    shutil.copy(Paths().PATH_template_1st, dirpath + newfile)

    # 用語リストの箇所に引数で受け取った用語を書き込む
    with open(dirpath + newfile, 'a', encoding='utf-8') as fst:
        for word in words4exam:
            fst.write('- ' + word + '\n')

        with open(Paths().PATH_template_2nd, 'r', encoding='utf-8') as snd:
            read_data = snd.read()
            fst.write(read_data)

    print(' Exported.')

    upload2slack(dirpath, newfile)
    return
예제 #8
0
    def fill_model(self, model):

        fixture_name = model['fixture']
        if fixture_name in self:
            fixture = self[fixture_name]
            model['im-original-scale'] = fixture.model.scale
            model['fixture-file'] = fixture.path

        else:

            model['im-original-scale'] = 1.0
            model['im-scale'] = 1.0
            model['fixture-file'] = Paths().get_fixture_path(model['fixture'], only_name=True)
예제 #9
0
    def update(self):

        directory = Paths().fixtures
        extension = ".config"

        list_fixtures = map(lambda x: x.split(extension, 1)[0],
                            [fixture for fixture in os.listdir(directory)
                                if fixture.lower().endswith(extension)])

        self._fixtures = dict()

        for f in list_fixtures:
            if f.lower() != "fixture":
                fixture = FixtureSettings(f, directory)
                self._fixtures[fixture.model.name] = fixture
예제 #10
0
def v_slide(params):
    """
    """

    paths = Paths()
    try:
        try:
            scn_file = OpenSlide(paths.slice_80)
        except OpenSlideUnsupportedFormatError:
            logging.error("OpenSlideUnsupportedFormatError!")
            return
        except OpenSlideError:
            logging.error("OpenSlideError!")
            return

        start_point = params["start_point"]
        x0 = start_point[0]
        y0 = start_point[1]
        bound_y = params["bound_y"]
        tile_path = params["tile_path"]
        save_tiles = params["save_tiles"]
        q = params["queue"]

        AVG_THRESHOLD = 170
        pid = os.getpid()
        data = {}
        while y0 < bound_y:
            img = scn_file.read_region((x0, y0), 0, (299, 299))
            green_c_avg = np.average(np.array(img)[:, :, 1])
            if green_c_avg < AVG_THRESHOLD:
                sufix = "_" + str(x0) + "_" + \
                        str(y0) + ".png"
                file_name = "scn80" + sufix
                img = np.array(img)
                img = img[:, :, 0:3]
                data['pred'] = img
                data['xlabel'] = np.array([x0])
                data['ylabel'] = np.array([y0])
                q.put(dict(data))
                if save_tiles:
                    img.save(os.path.join(tile_path, file_name))

            y0 += 150

        return pid
    finally:
        scn_file.close()
예제 #11
0
    def __init__(self, conf, actions_options, time_units, edit):

        wx.Dialog.__init__(self, None, title=_('Add action'), size=(330, 290))

        panel = wx.Panel(self)
        self.conf = conf
        self.actions_options = actions_options

        list_actions = []
        for i in self.actions_options:
            list_actions.append(i[0])

        wx.StaticText(panel, label=_('action'), pos=(10, 10))
        self.action_select = wx.ComboBox(panel,
                                         choices=list_actions,
                                         style=wx.CB_READONLY,
                                         size=(310, 32),
                                         pos=(10, 35))
        self.action_select.Bind(wx.EVT_COMBOBOX, self.onSelect)
        wx.StaticText(panel, label=_('data'), pos=(10, 70))
        self.data = wx.TextCtrl(panel, size=(310, 32), pos=(10, 95))
        wx.StaticText(panel, label=_('repeat after'), pos=(10, 130))
        self.repeat = wx.TextCtrl(panel, size=(150, 32), pos=(10, 155))
        self.repeat.Disable()
        self.repeat_unit = wx.ComboBox(panel,
                                       choices=time_units,
                                       style=wx.CB_READONLY,
                                       size=(150, 32),
                                       pos=(170, 155))
        self.repeat_unit.Bind(wx.EVT_COMBOBOX, self.onSelectUnit)
        self.repeat_unit.SetValue(_('no repeat'))

        if edit != 0:
            self.action_select.SetValue(list_actions[edit[1]])
            self.data.SetValue(edit[2])
            if edit[3] != 0.0:
                self.repeat.SetValue(str(edit[3]))
                self.repeat.Enable()
            self.repeat_unit.SetValue(time_units[edit[4]])

        cancelBtn = wx.Button(panel, wx.ID_CANCEL, pos=(70, 205))
        okBtn = wx.Button(panel, wx.ID_OK, pos=(180, 205))

        paths = Paths()
        self.home = paths.home
        self.currentpath = paths.currentpath
예제 #12
0
	def __init__(self, language):

		paths=Paths()

		gettext.install('openplotter', paths.currentpath+'/locale', unicode=False)
		presLan_en = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['en'])
		presLan_ca = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['ca'])
		presLan_es = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['es'])
		presLan_fr = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['fr'])
		presLan_nl = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['nl'])
		presLan_de = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['de'])

		if language=='en':presLan_en.install()
		if language=='ca':presLan_ca.install()
		if language=='es':presLan_es.install()
		if language=='fr':presLan_fr.install()
		if language=='nl':presLan_nl.install()
		if language=='de':presLan_de.install()
예제 #13
0
def main():
    args = parse_args()

    paths = Paths()
    checkpoints_path = str(paths.CHECKPOINTS_PATH)
    logging_path = str(paths.LOG_PATH)

    callbacks = [PrintCallback()]
    checkpoint_callback = ModelCheckpoint(filepath=checkpoints_path +
                                          '/{epoch}-{val_acc:.3f}',
                                          save_top_k=True,
                                          verbose=True,
                                          monitor='val_acc',
                                          mode='max',
                                          prefix='')
    early_stop_callback = EarlyStopping(monitor='val_acc',
                                        mode='max',
                                        verbose=False,
                                        strict=False,
                                        min_delta=0.0,
                                        patience=2)
    gpus = gpu_count()
    log_save_interval = args.log_save_interval
    logger = TensorBoardLogger(save_dir=logging_path, name='tuna-log')
    logger.log_hyperparams(args)
    max_epochs = args.epochs

    model = LeNet(hparams=args, paths=paths)
    trainer = Trainer(
        callbacks=callbacks,
        checkpoint_callback=checkpoint_callback,
        early_stop_callback=early_stop_callback,
        fast_dev_run=True,
        gpus=gpus,
        log_save_interval=log_save_interval,
        logger=logger,
        max_epochs=max_epochs,
        min_epochs=1,
        show_progress_bar=True,
        weights_summary='full',
    )
    trainer.fit(model)
예제 #14
0
    def get_marker_path(self):

        paths = Paths()

        if self.model.orentation_mark_path:
            marker_paths = (self.model.orentation_mark_path,
                         os.path.join(paths.images, os.path.basename(self.model.orentation_mark_path)),
                         paths.marker)
        else:
            marker_paths = (paths.marker,)

        for path in marker_paths:

            try:

                with open(path, 'rb') as _:
                    self._logger.info("Using marker at '{0}'".format(path))
                    return path
            except IOError:
                self._logger.warning("The designated orientation marker file does not exist ({0})".format(path))

        return None
예제 #15
0
    def add_path(self, aper, *path):

        if self._expansion > 0.0:
            if isinstance(aper, tuple):
                s = gerbertools.Shape(1e6)
                s.append_int(aper)
                s = s.offset(self._expansion, True)
                assert len(s) == 1
                aper = tuple(s.get_int(0))
            else:
                aper += from_mm(self._expansion)

        paths = self._paths.get(aper, None)

        # Due to roundoff error during rotation, some almost-identical
        # (actually identical in the gerber file) apertures can appear
        # for region apertures. To avoid this, look for apertures that
        # are "close enough".
        if paths is None and isinstance(aper, tuple):
            for ap2 in self._paths:
                if not isinstance(ap2, tuple):
                    continue
                if len(aper) != len(ap2):
                    continue
                err = 0
                for c1, c2 in zip(aper, ap2):
                    err += (c1[0] - c2[0])**2
                    err += (c1[1] - c2[1])**2
                    if err > 100:
                        break
                else:
                    aper = ap2
                    paths = self._paths[aper]
                    break

        if paths is None:
            paths = Paths()
            self._paths[aper] = paths
        paths.add(*path)
예제 #16
0
    def _configure(self):
        #################################
        # Global logging level
        #################################
        p = self.p
        u.verbose.set_level(p.verbose_level)

        #################################
        # Global data type switch
        #################################

        self.data_type = p.data_type
        assert p.data_type in ['single', 'double']
        self.FType = np.dtype(
            'f' + str(np.dtype(np.typeDict[p.data_type]).itemsize)).type
        self.CType = np.dtype(
            'c' + str(2 * np.dtype(np.typeDict[p.data_type]).itemsize)).type
        logger.info(_('Data type', self.data_type))

        #################################
        # Prepare interaction server
        #################################
        if parallel.master:
            # Create the inteaction server
            self.interactor = interaction.Server(p.interaction)

            # Start the thread
            self.interactor.activate()

            # Register self as an accessible object for the client
            self.interactor.objects['Ptycho'] = self

        # Check if there is already a runtime container
        if not hasattr(self, 'runtime'):
            self.runtime = u.Param()

        # Generate all the paths
        self.paths = Paths(self.p.paths, self.runtime)
예제 #17
0
import json
import math

#read json
with open (sys.argv[1]) as lat_json:
    latList = json.load(lat_json)

#see if reconfiguration is enabled
if sys.argv[2] == '1':
    reconfig = True
else:
    reconfig = False

projectname = ntpath.basename(sys.argv[3])
projectname = os.path.splitext(projectname)[0]
p = Paths(projectname)
xml_path = p.TMP_BUILD_PATH
#list of xml objects
xmlList = []
for modeInd in range(0, len(latList['modes'])):
    xmlObj = etree.parse(xml_path + '/sched' + str(modeInd) + '.xml')
    xmlList.append(xmlObj)
    if not(reconfig):
        break #just oneschedule in all2all
#List of worst-case packet latencies (for each mode if there is NoC reconfig):
LPWC_list = []
for xmlSched in xmlList:
    root = xmlSched.getroot()
    LPWC_list.append(str(int(root.attrib['length'])+6))

예제 #18
0
import os
from timer import timecallback
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score


logger configuration
FORMAT = "[%(filename)s: %(lineno)3s] %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)

name = 'CNN-Autoencoder-{}'.format(int(time.time()))

H = Hyperparams()
P = Paths()
nbl = NBatchLogger(display=1)
m = model()

tensorboard = TensorBoard(log_dir=os.path.join(P.log_dir, name))

adam = tf.keras.optimizers.Adam(lr=H.learning_rate)
rmsprop = tf.keras.optimizers.RMSprop(lr=H.learning_rate)
if os.path.exists(P.cnn_encoder):
    cnn_encoder = load_model(P.cnn_encoder)

else:
    cnn_train_data_loader = CNN_test_data_loader(H.train_batch_size)
    logger.info("cnn_train_data_loader created")
    cnn_encoder, cnn_autoencoder = m.get_model_cnn()
    logging.info("cnn_autoencoder created")
예제 #19
0
class Actions():
    def __init__(self, conf):

        self.options = []
        #ATENTION. If order changes, edit "run_action()" and ctrl_actions.py
        # 0 name, 1 message, 2 field data, 3 unique ID
        self.options.append([
            _('wait'),
            _('Enter seconds to wait in the field below.'), 1, 'ACT1'
        ])
        self.options.append([
            _('command'),
            _('Enter a Linux command and arguments in the field below.'), 1,
            'ACT2'
        ])
        self.options.append([_('reset'), 0, 0, 'ACT3'])
        self.options.append([_('shutdown'), 0, 0, 'ACT4'])
        self.options.append([_('stop NMEA multiplexer'), 0, 0, 'ACT5'])
        self.options.append([_('reset NMEA multiplexer'), 0, 0, 'ACT6'])
        self.options.append([_('stop Signal K server'), 0, 0, 'ACT7'])
        self.options.append([_('reset Signal K server'), 0, 0, 'ACT8'])
        self.options.append([
            _('stop WiFi access point'),
            _('Be careful, if you are connected by remote you may not be able to reconnect again.'
              ), 0, 'ACT9'
        ])
        self.options.append([
            _('start WiFi access point'),
            _('Be sure you have filled in all fields in "WiFi AP" tab and enabled WiFi access point.'
              ), 0, 'ACT10'
        ])
        self.options.append([_('stop SDR-AIS'), 0, 0, 'ACT11'])
        self.options.append([
            _('reset SDR-AIS'),
            _('Be sure you have filled in Gain and Correction fields in "SDR-AIS" tab and enabled AIS NMEA generation.'
              ), 0, 'ACT12'
        ])
        self.options.append([
            _('publish Twitter'),
            _('Be sure you have filled in all fields in "Accounts" tab, selected data to publish and enabled Twitter checkbox.\n\nEnter text to publish in the field below (optional).'
              ), 1, 'ACT13'
        ])
        self.options.append([
            _('send e-mail'),
            _('Be sure you have filled in all fields in "Accounts" tab, and enabled Gmail checkbox.\n\nEnter the subject in the field below.'
              ), 1, 'ACT14'
        ])
        self.options.append([_('play sound'), 'OpenFileDialog', 1, 'ACT15'])
        self.options.append([_('stop all sounds'), 0, 0, 'ACT16'])
        self.options.append([
            _('show message'),
            _('Enter the message in the field below.'), 1, 'ACT17'
        ])
        self.options.append([_('close all messages'), 0, 0, 'ACT18'])
        self.options.append([_('start all actions'), 0, 0, 'ACT19'])
        self.options.append([
            _('stop all actions'),
            _('This action will stop all the triggers except the trigger which has an action "start all actions" defined.'
              ), 0, 'ACT20'
        ])

        #Outputs
        x = conf.get('OUTPUTS', 'outputs')
        if x: self.out_list = eval(x)
        else: self.out_list = []
        for i in self.out_list:
            try:
                if i[0] == '1':
                    self.options.append([
                        i[1] + _(': High'),
                        _('ATTENTION! if you set this output to "High" and there is not a resistor or a circuit connected to the selected GPIO pin, YOU CAN DAMAGE YOUR BOARD.'
                          ), 0, 'H' + i[4]
                    ])
                    self.options.append([i[1] + _(': Low'), 0, 0, 'L' + i[4]])
            except Exception, e:
                print str(e)

        self.time_units = [
            _('no repeat'),
            _('seconds'),
            _('minutes'),
            _('hours'),
            _('days')
        ]

        paths = Paths()
        self.home = paths.home
        self.currentpath = paths.currentpath
예제 #20
0
    def update_path_to_local_copy(self, local_directory):

        self._conf_path = os.path.join(local_directory, Paths().experiment_local_fixturename)
예제 #21
0
class AudioMain:
    #some parameters:
    OH_MULT_0 = 4
    INOUT_BUF_SIZE = 128
    MAX_NOC_BANDWIDTH = 4
    Fs = 52083
    BUF_AMOUNT = 3
    #################### FX ######################
    #Amount of available cores in the platform
    CORE_AMOUNT = 4
    #Core order for minimal NoC channel usage
    coreOrder = [{
        'id': 0,
        'pos': '(0,0)'
    }, {
        'id': 1,
        'pos': '(1,0)'
    }, {
        'id': 2,
        'pos': '(0,1)'
    }, {
        'id': 3,
        'pos': '(1,1)'
    }]
    #List of available effects:
    #   -S: samples processed per execution
    #   -ORF: overhead reducing factor between minimum buffer size and S
    #   -util: utilization ratio: processing time per sample relative to sampling period
    FX = []
    #loaded JSON object
    audioApp = {}
    #List describing the used effects (used to create header)
    ModesList = []
    chan_id = 0
    LatencyList = []
    latenciesDict = {}
    #################### NoC ######################
    #Platform NoC description
    NoC = {
        'width': '2',
        'height': '2',
        'topoType': 'bitorus',
        'routerDepth': '3',
        'linkDepth': '0',
        'routerType': 'sync',
        'configurations': []
    }
    #List of NoC channels needed
    NoCChannels = []
    #Describer of the scheduler XML
    NoCConfs = []
    #setup project name and paths
    projectname = sys.argv[3]
    projectname = os.path.splitext(projectname)[0]
    print("PROJECT NAME: " + projectname)
    p = Paths(projectname)
    #NoC Reconfiguration
    NoCReconfig = False

    def __init__(self):
        #Read Audio APP JSON
        print("READING AUDIO APP FILE: " + sys.argv[2])
        with open(sys.argv[2]) as audioApp_json:
            self.audioApp = json.load(audioApp_json)
        print('READING FX LIST FILE: ' + sys.argv[1])
        with open(sys.argv[1]) as FX_json:
            jsonFX = json.load(FX_json)
        self.FX = jsonFX['FX_LIST']
        #check if reconfiguration is enabled
        if sys.argv[4] == '1':
            print('NoC RECONFIGURATION ENABLED')
            self.NoCReconfig = True
        else:
            print('NoC RECONFIGURATION DISABLED')

    #function used by addFX to add 1 FX
    def addThisFX(self, thisFX, fx_id, core, FXList, CORE_UTIL, thisChain,
                  prevChain):
        for fxi in range(0, len(self.FX)):
            #get fx_type and S
            if self.FX[fxi]['name'] == thisFX:
                fx_type = fxi
                S = self.FX[fxi]['S']
                ORF = self.FX[fxi]['ORF']
                #for 0: increase ORF
                if fx_id == 0:
                    ORF = ORF * self.OH_MULT_0
                util = self.FX[fxi]['util']
                break
            if fxi == (len(self.FX) - 1):
                print('ERROR: EFFECT ' \
                      + thisFX + ' DOES NOT EXIST')
                return (1, fx_id, core, FXList, CORE_UTIL)
        same_core_in = False
        #if chain starts or finishes, must update core
        chainStarts = False
        chainEnds = False
        if thisChain != prevChain:
            core += 1
            if thisChain > 0:
                chainStarts = True
            if thisChain == 0:
                chainEnds = True
            #print('chain start or finish')
        else:
            #see if there is enough utilization space on this core for this FX
            if (1 - CORE_UTIL[core]) < util:  #if not enough space
                core += 1
            else:
                #receives from same core: only if it is not 1st and its not empty
                if (fx_id != 0) and (CORE_UTIL[core] > 0):
                    same_core_in = True
        if core >= self.CORE_AMOUNT:
            print('ERROR: TOO MANY EFFECTS, DONT FIT IN ' \
                  + str(self.CORE_AMOUNT) + ' CORES')
            return (1, fx_id, core, FXList, CORE_UTIL)
        #update utilization
        CORE_UTIL[core] += util
        bufsize = S * ORF
        #limit: 32
        if bufsize > 32:
            bufsize = 32
        #store object
        fxObj = {
            'fx_id': fx_id,
            'core': self.coreOrder[core]['id'],
            'fx_type': fx_type,
            'S': S,
            'xb_size': bufsize,
            'yb_size': bufsize,
            'chain_id': thisChain
        }
        #check if it is same core in
        if same_core_in:
            FXList[len(FXList) - 1]['out_same'] = True
            fxObj['in_same'] = True
            #if out same, increase overhead reducer
            fxObj['xb_size'] = fxObj['xb_size'] * self.OH_MULT_0
        #check if there is fork or join
        if (len(FXList) > 0):  #if it is not first FX of this mode
            if (thisChain > 0) and (prevChain == 0):
                FXList[len(FXList) - 1]['is_fork'] = True
            if (thisChain == 0) and (prevChain > 0):
                fxObj['is_join'] = True
        #if it is first or last of chain:
        if chainStarts:
            fxObj['chain_start'] = True
            #check if last one was not fork: if not, it was a chain end
            if 'is_fork' not in FXList[len(FXList) - 1]:
                FXList[len(FXList) - 1]['chain_end'] = True
        if chainEnds:
            FXList[len(FXList) - 1]['chain_end'] = True
        FXList.append(fxObj)
        #print(thisFX + ', chain=' + str(thisChain) + ', core ' + str(core) + ': fx_util=' + str(util) + ', core_util='+ str(CORE_UTIL[core]))
        fx_id += 1
        return (0, fx_id, core, FXList, CORE_UTIL)

    #function to add chains form audioApp into the FX List
    def addFX(self):
        for mode in self.audioApp['modes']:
            FXList = []
            fx_id = 0
            core = 0
            #Current utilization on each core (between 0 and 1)
            CORE_UTIL = [0.5, 0, 0, 0]
            #to check which chain (0 = no chain, 1+ = chain number)
            thisChain = 0
            prevChain = 0
            #if mode starts with chain, create 1st FX with id 0
            if (type(mode[0]) == dict) and ('chains' in mode[0]):
                #add initial effect: dry
                FXList.append({
                    'fx_id': fx_id,
                    'core': 0,
                    'fx_type': 0,
                    'S': 1,
                    'xb_size': self.OH_MULT_0,
                    'yb_size': self.OH_MULT_0,
                    'chain_id': 0
                })
                fx_id += 1
                CORE_UTIL[0] = 1
            else:
                #if util of 1st > 0.5, create dry effect as first:
                for fxi in range(0, len(self.FX)):
                    #get fx_type and S
                    if (self.FX[fxi]['name']
                            == mode[0]) and (self.FX[fxi]['util'] > 0.5):
                        #add initial effect: dry
                        FXList.append({
                            'fx_id': fx_id,
                            'core': 0,
                            'fx_type': 0,
                            'S': 1,
                            'xb_size': self.OH_MULT_0,
                            'yb_size': self.OH_MULT_0,
                            'chain_id': 0
                        })
                        fx_id += 1
                        CORE_UTIL[0] = 1
            #loop through items in mode
            for item in mode:
                if type(item) == dict:
                    if 'chains' not in item:
                        print('ERROR: wrong name, ' \
                              + 'only effect names or the "chains" keyword are accepted')
                        return 1
                    else:
                        #iterate chains
                        for chain in item['chains']:
                            thisChain += 1
                            #iterate FX
                            for fxname in chain:
                                result, fx_id, core, FXList, CORE_UTIL = \
                                    self.addThisFX(fxname, fx_id, core, \
                                                   FXList, CORE_UTIL, thisChain, prevChain)
                                prevChain = thisChain
                                if result == 1:
                                    return 1
                else:
                    thisChain = 0
                    result, fx_id, core, FXList, CORE_UTIL = \
                        self.addThisFX(item, fx_id, core, \
                                       FXList, CORE_UTIL, thisChain, prevChain)
                    prevChain = thisChain
                    if result == 1:
                        return 1

            #add final effect: dry
            fxObj = {
                'fx_id': len(FXList),
                'core': 0,
                'fx_type': 0,
                'S': 1,
                'xb_size': 1,
                'yb_size': 1,
                'chain_id': 0
            }
            #check if last one is a join
            if prevChain > 0:
                fxObj['is_join'] = True
                FXList[len(FXList) - 1]['chain_end'] = True
            FXList.append(fxObj)
            #add mode to modes list
            self.ModesList.append(FXList)
        return 0

    #function to check that the amount of cores in each chain is balanced
    def checkChainBalance(self):
        for mode in self.ModesList:
            chain_amount = 0
            for fx in mode:
                if fx['chain_id'] > chain_amount:
                    chain_amount = fx['chain_id']
            cores_per_chain = [0] * chain_amount
            for i in range(0, len(cores_per_chain)):
                cores_on_chain = []
                for fx in mode:
                    if (fx['chain_id'] - 1) == i:
                        if fx['core'] not in cores_on_chain:
                            cores_on_chain.append(fx['core'])
                cores_per_chain[i] = len(cores_on_chain)
            #check that all chains have same amount of cores
            if len(cores_per_chain) > 0:
                chain_cores_base = cores_per_chain[0]
                for c in cores_per_chain:
                    if cores_per_chain[c] != chain_cores_base:
                        print(
                            'ERROR: chains have different amount of cores, must have the same'
                        )
                        return 1
        return 0

    #function to create connections
    def connectFX(self):
        for FXList in self.ModesList:
            for i in range(0, len(FXList)):
                if i == 0:  #first
                    in_con = 0
                else:
                    if 'in_same' in FXList[i]:
                        in_con = 3  #same core
                        FXList[i].pop('in_same')
                    else:
                        in_con = 2  #NoC
                if i == (len(FXList) - 1):  #last
                    out_con = 1
                else:
                    if 'out_same' in FXList[i]:
                        out_con = 3  #same core
                        FXList[i].pop('out_same')
                    else:
                        out_con = 2  #NoC

                fxAdd = {'in_con': in_con, 'out_con': out_con}
                #output connections IDs
                if i < (len(FXList) - 1):  #not last
                    fxAdd['to_id'] = []
                    if 'is_fork' in FXList[i]:
                        #amount of chains: as many as 'chain_start' amount
                        for fx in range(0, len(FXList)):
                            if 'chain_start' in FXList[fx]:
                                fxAdd['to_id'].append(self.chan_id)
                                self.chan_id += 1
                    else:
                        fxAdd['to_id'].append(self.chan_id)
                        self.chan_id += 1
                #add fxAdd to dict
                FXList[i].update(fxAdd)
            #loop again for input connections IDs
            for i in range(0, len(FXList)):
                if i > 0:  #not first
                    fxAdd = {'from_id': []}
                    if 'is_join' in FXList[i]:
                        #check channels of effects that are 'chain_end'
                        for fx in range(0, len(FXList)):
                            if 'chain_end' in FXList[fx]:
                                fxAdd['from_id'].append(FXList[fx]['to_id'][0])
                    else:  #not a join
                        #if its a chain start:
                        if 'chain_start' in FXList[i]:
                            #check channels of effects that are fork
                            for fx in range(0, len(FXList)):
                                if 'is_fork' in FXList[fx]:
                                    #channel index is given by chain_id
                                    fxAdd['from_id'].append \
                                        (FXList[fx]['to_id'][(FXList[i]['chain_id']-1)])
                                    break  #there is only one fork
                        else:
                            #receive from previous effect
                            fxAdd['from_id'].append(FXList[i - 1]['to_id'][0])
                    #add fxAdd to dict
                    FXList[i].update(fxAdd)

    #function to match buffer sizes on a join effect
    def setJoinSizes(self, jind, FXList):
        #input size: max(<chain_end yb_sizes>, <input>)
        size = FXList[jind]['xb_size']
        #loop 1st time: find maximum
        for channel_in in FXList[jind]['from_id']:
            for fx in range(0, len(FXList) - 1):
                if FXList[fx]['to_id'][0] == channel_in:
                    size = max(size, FXList[fx]['yb_size'])
        FXList[jind]['xb_size'] = size
        #print('join ' + str(jind) + ': max size: ' + str(size))
        #loop 2nd time: set value
        for channel_in in FXList[jind]['from_id']:
            for fx in range(0, len(FXList) - 1):
                if FXList[fx]['to_id'][0] == channel_in:
                    FXList[fx]['yb_size'] = size
        return FXList

    #function to match buffer sizes on a fork effect
    def setForkSizes(self, find, FXList):
        #output size: max(<chain_start xb_sizes>, <input>)
        size = FXList[find]['yb_size']
        #loop 1st time: find maximum
        for channel_out in FXList[find]['to_id']:
            for fx in range(1, len(FXList)):
                if FXList[fx]['from_id'][0] == channel_out:
                    size = max(size, FXList[fx]['xb_size'])
        FXList[find]['yb_size'] = size
        #print('fork ' + str(find) + ': max size: ' + str(size))
        #loop 2nd time: set value
        for channel_out in FXList[find]['to_id']:
            for fx in range(1, len(FXList)):
                if FXList[fx]['from_id'][0] == channel_out:
                    FXList[fx]['xb_size'] = size
        return FXList

    #function to change buffer sizes
    def setBufSizes(self):
        for FXList in self.ModesList:
            for i in range(0, len(FXList)):
                #xb_size: only for non-first
                if i != 0:
                    if FXList[i]['in_con'] == 3:  #same core in
                        #all must have same size: XeY
                        size = max(FXList[i]['xb_size'], FXList[i]['yb_size'], \
                                   FXList[i-1]['xb_size'], FXList[i-1]['yb_size'])
                        FXList[i]['xb_size'] = size
                        FXList[i]['yb_size'] = size
                        FXList[i - 1]['xb_size'] = size
                        FXList[i - 1]['yb_size'] = size
                    else:  #NoC in
                        #is it a join?
                        if 'is_join' in FXList[i]:
                            FXList = self.setJoinSizes(i, FXList)
                        #is it a 'chain_start'?
                        elif 'chain_start' in FXList[i]:
                            chan_id = FXList[i]['from_id'][0]
                            #find the fork
                            for fx in range(0, len(FXList)):
                                if 'is_fork' in FXList[fx]:
                                    #iterate through fork channels
                                    for channel in FXList[fx]['to_id']:
                                        if channel == chan_id:  #Found the right fork
                                            FXList = self.setForkSizes(
                                                fx, FXList)
                        else:  #No join or chain_start
                            chan_id = FXList[i]['from_id'][0]
                            #look for source FX
                            for fx in range(0, len(FXList) - 1):
                                if ('is_fork' not in FXList[fx]) and \
                                   ('chain_end' not in FXList[fx]) and \
                                   (FXList[fx]['out_con'] == 2) and \
                                   (FXList[fx]['to_id'][0] == chan_id):
                                    size = max(FXList[i]['xb_size'],
                                               FXList[fx]['yb_size'])
                                    FXList[i]['xb_size'] = size
                                    FXList[fx]['yb_size'] = size
                                    break
                #yb_size: only for non-last
                if i != (len(FXList) - 1):
                    if FXList[i]['out_con'] == 3:  #same core out
                        #all must have same size: XeY
                        size = max(FXList[i]['xb_size'], FXList[i]['yb_size'], \
                                   FXList[i+1]['xb_size'], FXList[i+1]['yb_size'])
                        FXList[i]['xb_size'] = size
                        FXList[i]['yb_size'] = size
                        FXList[i + 1]['xb_size'] = size
                        FXList[i + 1]['yb_size'] = size
                    else:  #NoC out
                        #is it a fork?
                        if 'is_fork' in FXList[i]:
                            FXList = self.setForkSizes(i, FXList)
                        #is it a 'chain_end'?
                        elif 'chain_end' in FXList[i]:
                            chan_id = FXList[i]['to_id'][0]
                            #find the join
                            for fx in range(0, len(FXList)):
                                if 'is_join' in FXList[fx]:
                                    #iterate through join channels
                                    for channel in FXList[fx]['from_id']:
                                        if channel == chan_id:  #Found the right join
                                            FXList = self.setJoinSizes(
                                                fx, FXList)
                        else:  #No fork or chain_end
                            chan_id = FXList[i]['to_id'][0]
                            #look for destination FX
                            for fx in range(1, len(FXList)):
                                if ('is_join' not in FXList[fx]) and \
                                   ('chain_start' not in FXList[fx]) and \
                                   (FXList[fx]['in_con'] == 3) and \
                                   (FXList[fx]['from_id'][0] == chan_id):
                                    size = max(FXList[i]['yb_size'],
                                               FXList[fx]['xb_size'])
                                    FXList[i]['yb_size'] = size
                                    FXList[fx]['xb_size'] = size
                                    break

    #function to set first and last FX to XeY
    def makeEdgesXeY(self):
        for FXList in self.ModesList:
            maxBuf = 0
            #first find max from x/y in/out
            for i in range(0, len(FXList)):
                if (i == 0) or (i == (len(FXList) - 1)):
                    maxTemp = max(FXList[i]['xb_size'], FXList[i]['yb_size'])
                    maxBuf = max(maxBuf, maxTemp)
            #then set values
            for i in range(0, len(FXList)):
                if (i == 0) or (i == (len(FXList) - 1)):
                    FXList[i]['xb_size'] = maxBuf
                    FXList[i]['yb_size'] = maxBuf

    #function to calculate the latency in RUNS OF CORE 0 (not in samples from input to output)
    def calcLatency(self):
        #JSON file containing latency values
        #self.latenciesDict = {}
        #add Fs and IO Buffer Latency
        self.latenciesDict['Fs'] = self.Fs
        self.latenciesDict['L_IO'] = self.INOUT_BUF_SIZE
        #Prepare to add FX Latency of each mode
        self.latenciesDict['modes'] = []
        for FXList in self.ModesList:
            coresDone = []
            Latency = 0
            #List to store channel buffer sizes
            buf_sizes_list = []
            #first, latency for the 1st sample to arrive
            for fx in FXList:
                #check that latency of this core has not jet been considered
                #(because just one FX needs to be considered per core)
                #Also, consider just one chain: only chains 0 and 1 (if it exists)
                if (fx['core'] not in coresDone) and ((fx['chain_id'] == 0) or
                                                      (fx['chain_id'] == 1)):
                    coresDone.append(fx['core'])
                    Latency += fx['yb_size']
                    buf_sizes_list.append(fx['yb_size'])
            #then, add xb_size of LAST
            last_xb_size = FXList[len(FXList) - 1]['xb_size']  #YES
            #Latency += last_xb_size #really?
            #object to add to latencies JSON
            latObj = {
                'FX_L': Latency,
                'LAST_FX_SIZE': last_xb_size,
                'BUF_S_LIST': buf_sizes_list
            }
            self.latenciesDict['modes'].append(latObj)
            #finally, divide by xb_size of LAST and ceil
            MasterLatency = math.ceil(Latency / last_xb_size)
            #add to latencies list
            self.LatencyList.append(MasterLatency)

    #function to extract NoC channels info
    def extNoCChannels(self):
        #first, create list with channel IDs
        chanIDs = []
        for FXList in self.ModesList:
            for fx in FXList:
                if ('from_id' in fx) and (fx['in_con']
                                          == 2):  #not first and NoC in
                    for chan in fx['from_id']:
                        if chan not in chanIDs:
                            chanIDs.append(chan)
                if ('to_id' in fx) and (fx['out_con']
                                        == 2):  #not last and NoC out
                    for chan in fx['to_id']:
                        if chan not in chanIDs:
                            chanIDs.append(chan)
        #then, extract info
        for ci in chanIDs:
            chanObj = {'chan_id': ci, 'buf_amount': self.BUF_AMOUNT}
            for mode_i in range(0, len(self.ModesList)):
                for fx in self.ModesList[mode_i]:
                    if 'from_id' in fx:  #not first
                        for chan in fx['from_id']:
                            if chan == ci:
                                chanObj['to_core'] = fx['core']
                                chanObj['mode'] = mode_i
                    if 'to_id' in fx:  #not last
                        for chan in fx['to_id']:
                            if chan == ci:
                                chanObj['from_core'] = fx['core']
                                chanObj['mode'] = mode_i
            self.NoCChannels.append(chanObj)

    #function to create header file
    def createHeader(self):
        #find info needed for audioinit.h
        audioCoresList = []
        FXAmountList = []
        allCores = 0
        maxFX = 0
        modes = 0
        for FXList in self.ModesList:
            coresUsed = []
            FXidsUsed = []
            modes += 1
            for fx in FXList:
                if fx['core'] not in coresUsed:
                    coresUsed.append(fx['core'])
                if fx['fx_id'] not in FXidsUsed:
                    FXidsUsed.append(fx['fx_id'])
            audioCoresList.append(len(coresUsed))
            FXAmountList.append(len(FXidsUsed))
            if len(coresUsed) > allCores:
                allCores = len(coresUsed)
            if len(FXidsUsed) > maxFX:
                maxFX = len(FXidsUsed)

        maxFXPerCore = [0] * allCores
        for FXList in self.ModesList:
            FXPerCore = [0] * allCores
            for fx in FXList:
                FXPerCore[fx['core']] += 1
            for i in range(0, len(FXPerCore)):
                maxFXPerCore[i] = max(maxFXPerCore[i], FXPerCore[i])

        #Header file
        FX_H = '''
        #ifndef _AUDIOINIT_H_
        #define _AUDIOINIT_H_

        '''
        #if NoC reconfiguration is enabled:
        if self.NoCReconfig:
            FX_H += '''
        //NoC Reconfiguration enabled
        #define NOC_RECONFIG'''
        FX_H += '''
        //input/output buffer sizes
        const unsigned int BUFFER_SIZE = ''' + str(self.INOUT_BUF_SIZE) + ''';
        //amount of configuration modes
        const int MODES = ''' + str(modes) + ''';
        //how many cores take part in the audio system (from all modes)
        const int AUDIO_CORES = ''' + str(allCores) + ''';
        //how many effects are on each mode in total
        const int FX_AMOUNT[MODES] = {'''
        for FXAmount in FXAmountList:
            FX_H += str(FXAmount) + ', '
        FX_H += '''};
        //maximum amount of effects per core
        const int MAX_FX_PER_CORE[AUDIO_CORES] = {'''
        for fxPC in maxFXPerCore:
            FX_H += str(fxPC) + ', '
        FX_H += '''};
        //maximum FX_AMOUNT
        const int MAX_FX = ''' + str(maxFX) + ''';
        // FX_ID | CORE | FX_TYPE | XB_SIZE | YB_SIZE | S | IN_TYPE | OUT_TYPE //'''
        for mode in range(0, modes):
            FX_H += '''
        const int FX_SCHED_''' + str(mode) + '''[''' \
            + str(FXAmountList[mode]) + '''][8] = {'''
            #fill in matrix
            for fx in self.ModesList[mode]:
                FX_H += '''
            { ''' + str(fx['fx_id']) + ', ' + str(fx['core']) + ', ' \
                + str(fx['fx_type']) + ', ' + str(fx['xb_size']) + ', ' \
                + str(fx['yb_size']) + ', ' + str(fx['S']) + ', ' \
                + str(fx['in_con']) + ', ' + str(fx['out_con']) + ' },'
            FX_H += '''
        };'''
        FX_H += '''
        //pointer to schedules
        const int *FX_SCHED_P[MODES] = {'''
        for mode in range(0, modes):
            FX_H += '''
            (const int *)FX_SCHED_''' + str(mode) + ','
        FX_H += '''
        };
        //amount of NoC channels (NoC or same core) on all modes
        const int CHAN_AMOUNT = ''' + str(self.chan_id) + ''';
        //amount of buffers on each NoC channel ID
        const int CHAN_BUF_AMOUNT[CHAN_AMOUNT] = { '''
        for chan in range(0, self.chan_id):
            chanPrinted = False
            for c in self.NoCChannels:
                if chan == c['chan_id']:
                    FX_H += str(c['buf_amount']) + ', '
                    chanPrinted = True
                    break
            if not (chanPrinted):
                FX_H += '1, '
        FX_H += '''};
        // column: FX_ID source   ,   row: CHAN_ID dest'''
        for mode in range(0, modes):
            FX_H += '''
        const int SEND_ARRAY_''' + str(mode) + '''[''' \
            + str(FXAmountList[mode]) + '''][CHAN_AMOUNT] = {'''
            #fill in matrix
            for fx in self.ModesList[mode]:
                FX_H += '''
            {'''
                for chan in range(0, self.chan_id):
                    thisOutCon = False
                    if 'to_id' in fx:
                        for chanTo in fx['to_id']:
                            if chanTo == chan:
                                FX_H += '1, '
                                thisOutCon = True
                    if not (thisOutCon):
                        FX_H += '0, '
                FX_H += '''},'''
            FX_H += '''
        };'''
        FX_H += '''
        //pointer to send arrays
        const int *SEND_ARRAY_P[MODES] = {'''
        for mode in range(0, modes):
            FX_H += '''
            (const int *)SEND_ARRAY_''' + str(mode) + ','
        FX_H += '''
        };
        // column: FX_ID dest   ,   row: CHAN_ID source'''
        for mode in range(0, modes):
            FX_H += '''
        const int RECV_ARRAY_''' + str(mode) + '''[''' \
            + str(FXAmountList[mode]) + '''][CHAN_AMOUNT] = {'''
            #fill in matrix
            for fx in self.ModesList[mode]:
                FX_H += '''
            {'''
                for chan in range(0, self.chan_id):
                    thisInCon = False
                    if 'from_id' in fx:
                        for chanFrom in fx['from_id']:
                            if chanFrom == chan:
                                FX_H += '1, '
                                thisInCon = True
                    if not (thisInCon):
                        FX_H += '0, '
                FX_H += '''},'''
            FX_H += '''
        };'''
        FX_H += '''
        //pointer to receive arrays
        const int *RECV_ARRAY_P[MODES] = {'''
        for mode in range(0, modes):
            FX_H += '''
            (const int *)RECV_ARRAY_''' + str(mode) + ','
        FX_H += '''
        };

        #endif /* _AUDIOINIT_H_ */'''
        #write file
        file = open(self.p.AudioInitFile, "w")
        file.write(FX_H)
        file.close()

    #function to fill in the NoCConfs array
    def confNoC(self):
        minBufSizes = []  # min buffer size for each mode
        for FXList in self.ModesList:
            #first, find out minimum buffer size:
            minBufSize = self.MAX_NOC_BANDWIDTH  #(start from maximum)
            for fx in FXList:
                if fx['xb_size'] < minBufSize:
                    minBufSize = fx['xb_size']
            minBufSizes.append(minBufSize)
        #no need to check for repeated channels (between same cores):
        #they have different IDs on the object
        #(i.e. are different channels)
        for mode in range(0, len(minBufSizes)):
            NoCConf = {
                'comType': 'custom',
                #phits: words per packet.
                #Stereo audio: 2 shorts = 1 word
                #including header and flag: phits=3
                #(minimum data is 1 word for ack channels)
                'phits': '3',
                'channels': []
            }
            for chan in self.NoCChannels:
                if chan['mode'] == mode:
                    for core in self.coreOrder:
                        if chan['from_core'] == core['id']:
                            from_p = core['pos']
                        if chan['to_core'] == core['id']:
                            to_p = core['pos']
                    chanObj = {
                        'from': from_p,
                        'to': to_p,
                        'bandwidth': str(minBufSizes[mode])
                        #packets per TDM period:
                        #each packet is a sample in this case.
                    }
                    #create reverted channel (for ACK)
                    chanObjRev = {
                        'from': to_p,
                        'to': from_p,
                        'bandwidth': '1'  #only 1 needed for ack
                    }
                    NoCConf['channels'].append(chanObj)
                    NoCConf['channels'].append(chanObjRev)
            self.NoCConfs.append(NoCConf)
        #add bandwidth values to latencies dict
        modeInd = 0
        for mode in self.latenciesDict['modes']:
            mode['BANDWIDTH'] = minBufSizes[modeInd]
            modeInd += 1
        #Write latencies object
        with open(sys.argv[5], 'w') as latJSON:
            json.dump(self.latenciesDict, latJSON)
        '''
        for NoCConf in self.NoCConfs:
            print('THIS MODE')
            for chan in NoCConf['channels']:
                print(chan)
        '''

    # Create the root element and new document tree
    def genNoCSchedule(self):
        nocsched = etree.Element(
            'nocsched',
            version='0.1',
            nsmap={'xi': 'http://www.w3.org/2001/XInclude'})
        doc = etree.ElementTree(nocsched)
        # Description
        desc = etree.SubElement(nocsched, 'description')
        desc.text = 'NoC TDM scheduling'
        #platform & topology
        platform = etree.SubElement(nocsched,
                                    'platform',
                                    width=self.NoC['width'],
                                    height=self.NoC['height'])
        topology = etree.SubElement(platform,
                                    'topology',
                                    topoType=self.NoC['topoType'],
                                    routerDepth=self.NoC['routerDepth'],
                                    linkDepth=self.NoC['linkDepth'],
                                    routerType=self.NoC['routerType'])
        #application & configurations
        configurations = etree.SubElement(
            etree.SubElement(nocsched, 'application'), 'configurations')
        for commun in self.NoCConfs:
            #iterate through keys (except "channels")
            communDict = {}
            communKeys = commun.keys()
            for key in communKeys:
                if (key != 'channels'):
                    communDict[key] = commun[key]
            communication = etree.SubElement(configurations, 'communication',
                                             communDict)
            #add channels, if there are
            if 'channels' in communKeys:
                for channel in commun['channels']:
                    channel = etree.SubElement(communication, 'channel',
                                               channel)

        # Save to XML file
        print("GENERATING NoC TDM SCHEDULING XML FILE: " + sys.argv[3])
        doc.write(sys.argv[3], xml_declaration=True, encoding='utf-8')
예제 #22
0
def spawnContainers(c, fullLowerDir):
    for ip in genIPlist(c.cntCommon):
        container = Container(ip, fullLowerDir, c)
        printInfo("***")
        print(INFO("Trying to start container " + container.cName))
        container.mountOverlayFS()
        container.spawn()
        container.checkStatus(full=False)
        print(INFO("Adding fingerprint to ssh known_hosts "))
        container.checkSSH()

if __name__ == "__main__":
    args = parseArgs()
    c = parseConfig(configFile)
    image = Image(c.image, c.path)
    paths = Paths(c.path)
    if args.o == "status":
        Network(c.bridge, c.ipt).checkNetwork()
        paths.checkStatus()
        image.loop.checkStatus()
        checkContainers(c)
    elif args.o == "getimage":
        paths.createWorkingDirs()
        image.checkPre()
        image.fetch()
    elif args.o == "cleanup":
        paths.rmWorkingDirs()
    elif args.o == "spawn":
        c.cntVariables.prepareSSHPublicKey()
        c.cntVariables.generateVariablesFile(c.cntCommon.variablesFile)
        image.loop.mountImagePartition()
"""
Script to run categorizer on a given doc
"""
import os
import argparse
import tensorflow as tf
from nltk.corpus import stopwords

from word_embedder_gensim import WordVectorizer
from rnn_w2v import RNN_Model
from preprocess import preprocess_doc
from batch_generator import embedding_lookup
from paths import Paths

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
paths = Paths()
classes = {
    1: "Company",
    2: "Educational Institution",
    3: "Artist",
    4: "Athlete",
    5: "Office Holder",
    6: "Mean Of Transportation",
    7: "Building",
    8: "Natural Place",
    9: "Village",
    10: "Animal",
    11: "Plant",
    12: "Album",
    13: "Film",
    14: "Written Work"
예제 #24
0
 def prepend_paths(self, *paths):
     new = Paths(paths)
     new.extend(self.paths)
     self.paths = new
예제 #25
0
 def __init__(self, root="."):
     self.root = os.path.abspath(root)
     self.paths = Paths(root=self.root)
     self.extensions = Extensions()
     self.aliases = {}
예제 #26
0
from brain_path import BrainPath
from paths import Paths
from kpaths import KPaths
from copy import deepcopy
from heapq import heappush
from heapq import heappop

p = BrainPath(5)

k = 10
ps = Paths(k, 20)

print(p)

for i in range(20):
    p = p.add(i, 1)
    print(p)
    print(ps.add(p))
    print(ps.add(p))

p = BrainPath(1)
for i in range(20):
    p = p.add(i, 0.1)
    print(p)
    print(ps.add(p))
    print(ps.add(p))
    print(len(ps))

print(ps)
print(len(ps))
예제 #27
0
def train_resnet(
        new_model=False,
        batch_size=32,
        epochs=20,
        validation=True):
    """
    """
    start_time = datetime.now()
    paths = Paths()
    model_name = "ResNet50"
    hdf5_path = os.path.join("..", "data", "76_79_80_noValidate.hdf5")
    if new_model:
        model = ResNet50()

    else:
        model = load_model_from_json()

    adam_opt = Adam(lr=0.0001, decay=1e-6)
    model.compile(
            optimizer=adam_opt,
            loss="categorical_crossentropy",
            metrics=["accuracy"]
            )

    hdf5_file = tables.open_file(hdf5_path, mode='r')
    n_train = hdf5_file.root.train_img.shape[0]
    n_test = hdf5_file.root.test_img.shape[0]

    steps_per_epoch = int(ceil(n_train / batch_size))
    validation_steps = int(ceil(n_test / batch_size))

    timestamp = datetime.now().strftime(r"%Y%m%d_%I%M%S%p")
    tb_log_path = os.path.join(
        paths.logs, '{}_logs_{}'.format(model_name, timestamp))
    os.makedirs(tb_log_path, exist_ok=True)
    os.makedirs(paths.models, exist_ok=True)

    epoch_loss_path = os.path.join(
        paths.logs,
        "{}_epoch_loss_{}.log".format(model_name, timestamp)
    )
    batch_loss_path = os.path.join(
        paths.logs,
        "{}_batch_loss_{}.log".format(model_name, timestamp)
    )
    model_hdf5_path = os.path.join(
        paths.models,
        "{}_{}.hdf5".format(model_name, timestamp)
    )
    history = LossHistory(
        epoch_loss_path,
        batch_loss_path,
        model_hdf5_path
    )
    tensorboard = TensorBoard(
        log_dir=tb_log_path,
        # write_grads=True,
        write_images=True
        # histogram_freq=5
    )

    try:
        model.fit_generator(
                read_hdf5(
                        hdf5_file,
                        batch_size=batch_size,
                        ),
                steps_per_epoch=steps_per_epoch,
                epochs=epochs,
                verbose=1,
                callbacks=[history, tensorboard],
                validation_data=read_hdf5(
                        hdf5_file,
                        dataset="test",
                        batch_size=batch_size,
                        ),
                validation_steps=validation_steps
                )

        if validation:
            eva_data = hdf5_file["val_img"][...]
            eva_labels = hdf5_file["val_labels"][...]
            print("Validation data shape: {}".format(str(eva_data.shape)))
            print("validation labels shape: {}".format(str(eva_labels.shape)))
            preds = model.evaluate(eva_data, eva_labels)
            print("Validation loss: {}".format(preds[0]))
            print("Validation accuracy: {}".format(preds[1]))

        hdf5_file.close()

    except StopIteration:
        hdf5_file.close()
    finally:
        hdf5_file.close()

    print("Training time: ", datetime.now() - start_time)
예제 #28
0
def train(
    model_name,
    batch_size=32,
    epochs=10,
    classes=2,
    n_gpu=8,
    validation=True
):
    """
    """

    start_time = datetime.now()
    paths = Paths()
    hdf5_path = os.path.join("..", "data", "76_79_80.hdf5")
    if n_gpu > 1:
        # create model on cpu
        print("Using {} gpus...".format(n_gpu))
        with tf.device("/cpu:0"):
            model = generate_model(
                model_name,
                input_shape=(299, 299, 3),
                include_top=False,
                weights=None,
                classes=classes,
                pooling="avg"
            )
            print("Generated model on cpu...")
            sys.stdout.flush()

        # generate mutiple models to use multi gpus
        model = multi_gpu_model(model, gpus=n_gpu)
        print("Created parallel model...")
        sys.stdout.flush()

    else:
        print("Using single gpu...")
        model = generate_model(
            model_name,
            input_shape=(299, 299, 3),
            include_top=False,
            weights=None,
            classes=2,
            pooling="avg"
        )
        print("Generated model on cpu...")
        sys.stdout.flush()

    model.compile(
            optimizer="adam",
            loss="categorical_crossentropy",
            metrics=["accuracy"]
            )
    print("Model compiled...")
    sys.stdout.flush()

    timestamp = datetime.now().strftime(r"%Y%m%d_%I%M%p")
    tb_log_path = os.path.join(
        paths.logs, '{}_logs_{}'.format(model_name, timestamp))
    os.makedirs(tb_log_path, exist_ok=True)
    os.makedirs(paths.models, exist_ok=True)

    epoch_loss_path = os.path.join(
        paths.logs,
        "{}_epoch_loss_{}.log".format(model_name, timestamp)
    )
    batch_loss_path = os.path.join(
        paths.logs,
        "{}_batch_loss_{}.log".format(model_name, timestamp)
    )
    model_hdf5_path = os.path.join(
        paths.models,
        "{}_{}.hdf5".format(model_name, timestamp)
    )
    history = LossHistory(
        epoch_loss_path,
        batch_loss_path,
        model_hdf5_path
    )

    tensorboard = TensorBoard(log_dir=tb_log_path)
    print("Start training...")
    sys.stdout.flush()

    data_params = {
        "batch_size": batch_size,
        "n_classes": classes,
        "shuffle": True
    }
    train_generator = DataGenerator(hdf5_path, "train", **data_params)
    valid_generator = DataGenerator(hdf5_path, "test", **data_params)
    if n_gpu == 1:
        model.fit_generator(
            train_generator,
            epochs=epochs,
            verbose=2,
            callbacks=[history, tensorboard],
            validation_data=valid_generator
            # use_multiprocessing = True,
            # workers = 3
            # max_queue_size = 5
            )
    if n_gpu > 1:
        model.fit_generator(
            train_generator,
            epochs=epochs,
            verbose=2,
            callbacks=[history, tensorboard],
            validation_data=valid_generator,
            use_multiprocessing=True,
            workers=3
            # max_queue_size = 5
        )

    if validation:
        pred_generator = DataGenerator(hdf5_path, "val", **data_params)
        preds = model.evaluate_generator(
                pred_generator
                # use_multiprocessing = False,
                # workers = 8
                )
        print("Validation loss: {}".format(preds[0]))
        print("Validation accuracy: {}".format(preds[1]))

    time_consumed = datetime.now() - start_time
    hours = time_consumed.seconds // 3600
    minutes = time_consumed.seconds % 3600 // 60
    seconds = time_consumed.seconds % 60
    print("Training time: {}h{}m{}s".format(hours, minutes, seconds))
distances = np.max(distances) - distances

[m, _] = np.shape(distances)

heap = []

k = 1

connections = np.zeros((m, m), dtype=int)

with open('1-best-connectivity-path.txt', 'w') as file:
    for start in range(m):
        print(start)

        ps = Paths(k, m)

        p = BrainPath(start)
        heappush(heap, p)

        while len(heap) > 0:
            path: BrainPath = heappop(heap)
            for next_node in range(m):
                if next_node not in path.elements:
                    new_path = path.add(next_node, distances[path.last(),
                                                             next_node])
                    if ps.add(new_path):
                        heappush(heap, new_path)
        file.write(ps.str(start) + '\n')

        connections += ps.matrix_representation()
예제 #30
0
#PREPROCESSOR AM-CTAC
#Script for the preprocesing of URLs from tickets for the Analizer Module
#____________________________________________________________________________
#____________________________________________________________________________
#Configuration:
#
#The unique parameter that have to be configured is the hours, that represents
#the interval of time in that the script is executed.
#
#_____________________________________________________________________________
from clients import Clients
from tickets import Tickets
from ticketspaths import Ticketpaths
from paths import Paths


#                                 unique parameter
#Tables                                  |
#ticketpaths                             V
ticpaths = Ticketpaths(Clients(),Tickets(8))
paths=Paths(ticpaths)

#Client_score and recurrence
paths.paths_recurrence()
paths.paths_client_score()

#insert_data
    #ticketpaths
ticpaths.tp_insert_data()
    #paths
paths.paths_insert_data()