Esempio n. 1
0
 def __init__(self, path, depth):
     self.clf = tree.DecisionTreeClassifier(max_depth=3)
     self.file_io = FileIO()
     #self.pca = PCAProcess()
     #self.chart = DrawChart()
     self.test = Test()
     self.file_path = path
Esempio n. 2
0
 def start_infinity_job(self, infinity_instructions):
     """Start a job and save instructions to a variable (infinity_data) so they can be perpetually run with :meth:`start_job`
     """
     if debug == True: FileIO.log('instruction_queue.start_infinity_job called')
     if infinity_instructions and len(infinity_instructions):
         self.infinity_data = json.dumps(infinity_instructions,sort_keys=True,indent=4,separators=(',',': '))
         self.start_job(infinity_instructions, True)
Esempio n. 3
0
 def erase_job(self, data):
     """Call :meth:`clear`... redundant, consider removing, and why does it have unused data parameter???
     """
     if debug == True: FileIO.log('the_queue.erase_job called')
     #doesn't map to smoothieAPI
     #function eraseJob(){
     self.clear() 
Esempio n. 4
0
    def onJoin(self, details):
        """Callback fired when WAMP session has been established.

        May return a Deferred/Future.

        Starts instatiation of robot objects by calling :meth:`otone_client.instantiate_objects`.
        """
        if debug == True: FileIO.log('otone_client : WampComponent.onJoin called')
        if not self.factory._myAppSession:
            self.factory._myAppSession = self
        
        crossbar_status = True    
        instantiate_objects()
        
        
        def set_client_status(status):
            if debug == True: FileIO.log('otone_client : WampComponent.set_client_status called')
            global client_status
            client_status = status
            self.publish('com.opentrons.robot_ready',True)
        
        FileIO.log('about to publish com.opentrons.robot_ready TRUE')
        self.publish('com.opentrons.robot_ready',True)
        yield from self.subscribe(set_client_status, 'com.opentrons.browser_ready')
        yield from self.subscribe(subscriber.dispatch_message, 'com.opentrons.browser_to_robot')
Esempio n. 5
0
def write_led(num, val):
    """Turn an LED on or off.

    Not currently implemented. This is in anticipation of having LED indicators
    """
    if debug == True: FileIO.log('script_keeper.write_led called')
    subprocess.call([os.path.join(dir_path,'../../otone_scripts/write_led.sh'),str(num),str(val)])
Esempio n. 6
0
 def erase_job(self):
     """Erase the ProtocolRunner job
     """
     if debug == True: FileIO.log('instruction_queue.erase_job called')
     self.head.erase_job()
     self.isRunning = False;
     self.instructionArray = []
Esempio n. 7
0
 def resume_job(self):
     """Call :meth:`resume`... redundant, consider removing
     """
     if debug == True: FileIO.log('the_queue.resume_job called')
     #doesn't map to smoothieAPI
     #function resumeJob()
     self.resume()
Esempio n. 8
0
    def __init__(self, axis):
        """Initialize Pipette

        toolname = the name of the tool (string)
        tooltype = the type of tool e.g. 1ch pipette, 8ch pipette, etc.(string)
        axis = position of tool on head & associated 
                motor (A, B, C, etc) (string)
        offset = the offset in space from the A tool which is defined to
            have offset = (0,0,0)
        """
        if debug == True: FileIO.log('pipette.__init__ called')
        toolname = axis + '_pipette'
        super().__init__(toolname, 'pipette', axis)

        #default parameters to start with
        self.resting = 0  #rest position of plunger when not engaged
        self.top = 0   #just touching the plunger (saved)
        self.bottom = 1  #calculated by pipette object (saved)
        self.blowout = 2  #value saved 2-5 mm before droptip (saved)
        self.droptip = 4  #complete max position of plunger (saved)

        #the calibrated plate offsets for this pipette

        self.volume = 200  #max volume pipette can hold calculated during calibration (saved in pipette_calibrations.json)
        self.bottom_distance = 2 #distance between blowout and botton

        self.theContainers = {} #(saved)
        self.tip_racks = []
        self.trash_container = []
        self.tip_rack_origin = ""
Esempio n. 9
0
 def publish_calibrations(self):
     """Publish calibrations data
     """
     if debug == True: FileIO.log('head.publish_calibrations called')
     self.pubber.send_message('containerLocations',self.get_deck())
     self.pubber.send_message('pipetteValues',self.get_pipettes())
     
Esempio n. 10
0
def demo(code='N225',
         name='日経平均株価',
         start='2014-01-01',
         days=240,
         csvfile=os.path.join(os.path.dirname(
             os.path.abspath(__file__)),
             '..',
             'test',
             'stock_N225.csv'),
         update=False):

    # Handling ti object example.
    io = FileIO()
    stock_d = io.read_from_csv(code,
                               csvfile)
    ti = TechnicalIndicators(stock_d)
    ti.calc_ret_index()

    print(ti.stock['ret_index'].tail(10))
    io.save_data(io.merge_df(stock_d, ti.stock),
                 code, 'demo_')

    # Run analysis code example.
    analysis = Analysis(code=code,
                        name=name,
                        start=start,
                        days=days,
                        csvfile=csvfile,
                        update=True)
    return analysis.run()
Esempio n. 11
0
def task_gen_top_tag_via_number(query, K, root):
    from photo_dao import PhotoDao
    from database import DBHelper
    db_helper = DBHelper()
    db_helper.init(root)

    photo_dao = PhotoDao(db_helper)
    photo_ids = photo_dao.getClassPhotoIds(query, ''.join([query]))

    photos = photo_dao.getPhotos(query, photo_ids)

    hist = {}
    for photo in photos:
        tags = photo.tags
        for tag in tags:
            if(tag in hist):
                hist[tag] = hist[tag] + 1
            else:
                hist[tag] = 0
    top_word_freq = sorted(hist.items(), key=lambda t: -t[1])
    top_word_freq = top_word_freq[0:min(len(top_word_freq), K)]
    top_word = []
    for line in top_word_freq:
        top_word.append(line[0].strip())

    output_path = ''.join([root, '/data/tags/%s.txt'%query])
    from file_io import FileIO
    file_io = FileIO()
    file_io.write_strings_to_file(top_word, output_path)

    return top_word
Esempio n. 12
0
 def home(self, data):
     """Intermediate step to start a homing sequence
     """
     if debug == True: FileIO.log('subscriber.home called')
     self.runner.insQueue.infinity_data = None
     self.runner.insQueue.erase_job()
     self.head.home(data)
Esempio n. 13
0
def share_inet():
    """Triggers ethernet interface (eth0) to try to obtain ip address via dhcp by taking it down, and then 
    bringing it up
    """
    if debug == True: FileIO.log('script_keeper.share_inet called')
    cmd = os.path.join(dir_path,'../../otone_scripts/share_inet.sh')

    create_share = asyncio.create_subprocess_exec(cmd,stdout=asyncio.subprocess.PIPE)

    criterion = True
    while criterion == True:
        proc_share = yield from create_share
        stdout_, stderr_ = yield from proc_share.communicate()

        if stdout_ is not None:
            stdout_str = stdout_.decode("utf-8")
            if debug == True and verbose == True: FileIO.log('share_inet.stdout... '+stdout_str)
            read_progress(stdout_str)
        else:
            if debug == True and verbose == True: FileIO.log('share_inet.stdout... None')
        if stderr_ is not None:
            if debug == True and verbose == True: FileIO.log('share_inet.stderr...'+stderr_.decode("utf-8"))
        else:
            if debug == True and verbose == True: FileIO.log('share_inet.stderr... None')
        if proc_share.returncode is not None:
            criterion = False
    return
Esempio n. 14
0
    def __init__(self, tools, publisher):
        """Initialize Head object
        
        tools = dictionary of the tools on the head
        
        """
        if debug == True: FileIO.log('head.__init__ called')
        self.smoothieAPI = openSmoothie.Smoothie(self)
        self.PIPETTES = {'a':Pipette('a'),'b':Pipette('b')}    #need to create this dict in head setup
        self.tools = tools
        self.pubber = publisher
        self.smoothieAPI.set_raw_callback(self.pubber.on_raw_data)
        self.smoothieAPI.set_position_callback(self.pubber.on_position_data)
        self.smoothieAPI.set_limit_hit_callback(self.pubber.on_limit_hit)
        self.smoothieAPI.set_move_callback(self.pubber.on_start)
        self.smoothieAPI.set_delay_callback(self.pubber.show_delay)
        self.theQueue = TheQueue(self, publisher)
        
        #connect with the smoothie board
        self.smoothieAPI.connect()
        self.path = os.path.abspath(__file__)
        self.dir_path = os.path.dirname(self.path)  
        self.dir_par_path = os.path.dirname(self.dir_path)
        self.dir_par_par_path = os.path.dirname(self.dir_par_path)      

        self.load_pipette_values()
Esempio n. 15
0
 def raw(self, string):
     """Send a raw command to the Smoothieboard
     """
     if debug == True: FileIO.log('head.raw called')
     #maps to smoothieAPI.raw()
     #function raw(string)
     self.smoothieAPI.raw(string)
Esempio n. 16
0
 def run_program(self,progName,ctrl=None,lid=None,vesselType=None,vesselVol=None):
     """run a named program in the cycler's files
     Name must be in quotes
     specify control method BLOCK, PROBE, or CALC. CALC by default
     specify heated lid on or off. On by default
     specify vessel type ('"Tubes"' or '"Plate"') and volume (10-100)
     Returns True if successful, False if not
     """
     # first check if program exists and exit program if not
     if not self.find_program(progName):
         print("Program does not exist")
         return False
     # cancel other programs
     if self.check_busy():
         self.cancel()
     # make sure lid is closed
     self.close_lid()
     # control method is CALC by default, lid ON by default
     ctrl = ctrl or self.ctrl
     lid = lid or self.lid
     # set temp calc algorithm parameters
     self.set_calc(vesselType,vesselVol)
     # send run command to cycler
     sendStr = self.format_input(['RUN '+progName,ctrl,lid],',')
     if debug == True: FileIO.log('cycler.py sending {0}'.format(sendStr))
     self.send(sendStr)
     return True
Esempio n. 17
0
 def delay_state(self):
     """Sets theState object's delaying value to 0, and then calls :meth:`on_state_change`.
     Used by :meth:`delay` for timing end of a delay
     """
     if debug == True: FileIO.log('smoothie_ser2net.delay_state called')
     self.theState['delaying'] = 0
     self.on_state_change(self.theState)
Esempio n. 18
0
 def reset(self):
     """Reset robot
     """
     if debug == True: FileIO.log('smoothie_ser2net.reset called')
     if self.my_transport is not None:
         resetString = _dict['reset']
         self.send(self, resetString)
Esempio n. 19
0
    def on_connect(self, theState):
        """Callback when connection made

        currently does zilch
        """
        if debug == True:
            FileIO.log('smoothie_ser2net.on_connect called')
Esempio n. 20
0
 def move_pipette(self, data):
     """Tell the :class:`head` to move a :class:`pipette` 
     """
     if debug == True: FileIO.log('subscriber.move_pipette called')
     axis = data['axis']
     property_ = data['property']
     self.head.move_pipette(axis, property_)
Esempio n. 21
0
 def __init__(self, con_path, char_type):
     # 初期化
     self.file_io = FileIO()
     self.extract_col = ExtractColumns()
     self.con_path = con_path
     # ファイルオープン
     self.con = self.file_io.open_file_as_pandas(con_path, char_type)
Esempio n. 22
0
 def reset(self):
     """Reset the Smoothieboard and clear theQueue object (:class:`the_queue`)
     """
     if debug == True: FileIO.log('head.reset called')
     #maps to smoothieAPI.reset() with extra code
     self.smoothieAPI.reset()
     self.theQueue.clear();
Esempio n. 23
0
 def try_add(self, cmd):
     """Add a command to the smoothieQueue
     """
     FileIO.log('smoothie_ser2net.try_add called')
     self.smoothieQueue.append(cmd)
     #if len(self.smoothieQueue) == 1:
     self.try_step()
Esempio n. 24
0
 def get_state(self):
     """Get state information from Smoothieboard
     """
     if debug == True: FileIO.log('head.get_state called')
     #maps to smoothieAPI.get_state()
     #function get_state ()
     return self.smoothieAPI.get_state()
Esempio n. 25
0
 def move_plunger(self, data):
     """Tell the :class:`head` to move a :class:`pipette` to given location(s)
     """
     if debug == True:
         FileIO.log('subscriber.move_plunger called')
         if verbose == True: FileIO.log('\ndata:\n\t',data,'\n')
     self.head.move_plunger(data['axis'], data['locations'])
Esempio n. 26
0
 def calibrate_container(self, pipette, container):   
     """Set the location of a container
     """
     if debug == True: FileIO.log('head.calibrate_container called')
     if pipette and self.PIPETTES[pipette]:     
         state = self.smoothieAPI.get_state()
         self.PIPETTES[pipette].calibrate_container(container,state)
Esempio n. 27
0
 def connect(self):
     """Make a connection to Smoothieboard using :class:`CB_Factory`
     """
     if debug == True: FileIO.log('smoothie_ser2net.connect called')
     self.my_loop = asyncio.get_event_loop()
     callbacker = self.CB_Factory(self)
     asyncio.async(self.my_loop.create_connection(lambda: callbacker, host='0.0.0.0', port=3333))
Esempio n. 28
0
    def create_deck(self, new_deck):
        """Create a dictionary of new container names to be stored in each pipette given a deck list

        Calls :meth:`head.save_pipette_values` right before returning dictionary

        :returns: container data for each axis
        :rtype: dictionary
        
        """
        if debug == True: 
            FileIO.log('head.create_deck called')
            if verbose == True:
                FileIO.log('\tnewDeck:\n\n', new_deck,'\n')
        
        #doesn't map to smoothieAPI
        nameArray = []  

        for containerName in new_deck :
            nameArray.append(containerName) 
        
        response = {}  
        
        for n in self.PIPETTES:
            response[n] = self.PIPETTES[n].create_deck(nameArray)  

        self.save_pipette_values() 
        return response         
Esempio n. 29
0
    def __init__(
        self,
        id_path,
        cust_payment_path,
        cust_attr_path,
        product_attr_path,
        cust_path,
        cancel_path,
        contact_path,
        cti_path,
        register_type_path,
        status_path,
        stay_time_path,
        pv_sum_path,
        session_path,
        char_type):

        self.file_io = FileIO()
        self.encode = CategoryEncode()
        self.count_rec = CountRecord()
        self.extract_col = ExtractColumns()
        # ファイルオープン
        self.id = self.file_io.open_file_as_pandas(id_path,char_type)
        self.cust_payment = self.file_io.open_file_as_pandas(cust_payment_path, char_type)
        self.cust_attr = self.file_io.open_file_as_pandas(cust_attr_path, char_type)
        self.product_attr = self.file_io.open_file_as_pandas(product_attr_path, char_type)
        self.cust = self.file_io.open_file_as_pandas(cust_path, char_type)
        self.cancel = self.file_io.open_file_as_pandas(cancel_path, char_type)
        self.contact = self.file_io.open_file_as_pandas(contact_path, char_type)
        self.cti = self.file_io.open_file_as_pandas(cti_path, char_type)
        self.register_type = self.file_io.open_file_as_pandas(register_type_path, char_type)
        self.status = self.file_io.open_file_as_pandas(status_path, char_type)
        self.stay_time = self.file_io.open_file_as_pandas(stay_time_path, char_type)
        self.pv_sum = self.file_io.open_file_as_pandas(pv_sum_path, char_type)
        self.session = self.file_io.open_file_as_pandas(session_path, char_type)
Esempio n. 30
0
    def on_state_change(self, state):
        """Check the given state (from Smoothieboard) and engage :obj:`theQueue` (:class:`the_queue`) accordingly

        If the state is 1 or the state.delaying is 1 then :obj:`theQueue` is_busy,

        else if the state is 0 and the state.delaying is 0, :obj:`theQueue` is not busy, 
        clear the currentCommand for the next one, and if not paused, tell :obj:`theQueue` 
        to step. Then update :obj:`theState`.

        :todo:
        :obj:`theState` should be updated BEFORE the actions taken from given state
        """
        if debug == True: FileIO.log('head.on_state_change called')
        
        if state['stat'] == 1 or state['delaying'] == 1:
            self.theQueue.is_busy = True

        elif state['stat'] == 0 and state['delaying'] == 0:
            self.theQueue.is_busy = False
            self.theQueue.currentCommand = None
            if self.theQueue.paused==False:
                self.theQueue.step(False)
    
        self.theState = state
        if debug == True and verbose == True: FileIO.log('\n\n\tHead state:\n\n',self.theState,'\n')
Esempio n. 31
0
    def end_sequence(self):
        """Returns the end pipetting sequence when running pipette command - currently an empty dictionary
        """
        if debug == True: FileIO.log('pipette.end_sequence called')
        oneCommand = {}

        return [oneCommand]
Esempio n. 32
0
def restart():
    """Restart the Crossbar WAMP router and Python backend.

    By default, does not change networking configuration.
    """
    if debug == True: FileIO.log('script_keeper.restart called')
    subprocess.call([os.path.join(dir_path,'../../otone_scripts/start.sh'), 'NOCHANGE'])
Esempio n. 33
0
 def instructions(self, data):
     """Intermediate step to have :class:`prtocol_runner` and :class:`the_queue` start running a protocol
     """
     if debug == True:
         FileIO.log('subscriber.instructions called')
         if verbose == True: FileIO.log('\targs: ', data,'\n')
     if data and len(data):
         self.runner.insQueue.start_job (data, True)
Esempio n. 34
0
def test_read_csv():
    io = FileIO()
    filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            'stock_N225.csv')
    df = io.read_from_csv("N225", filename)

    result = round(df.ix['2015-03-20', 'Adj Close'], 2)
    expected = 19560.22
    eq_(expected, result)
Esempio n. 35
0
 def __init__(self, in_path, in_char, payment_path, out_char,
              cust_attr_path, product_attr_path):
     self.count_rec = CountRecord()
     self.file_io = FileIO()
     self.in_path = in_path
     self.in_char = in_char
     self.payment_path = payment_path
     self.out_char = out_char
     self.cust_attr_path = cust_attr_path
     self.product_attr_path = product_attr_path
Esempio n. 36
0
 def __init__(self):
     self.lr = LinearRegression()
     self.file_io = FileIO()
     #self.pca = PCAProcess()
     #self.chart = DrawChart()
     self.test = Test()
     self.individual = IndividualTest()
     self.sc = StandardScaler()
     self.ms = MinMaxScaler()
     self.drop_na = DropNaN()
Esempio n. 37
0
def main(file_name):
    file_io = FileIO(file_name)
    corpus = file_io.read()
    tc = TextCleanser()

    corpus_words = tc.clean_text(corpus)  # Get a list of every words in corpus
    text_gen = TextGenerator(corpus_words)

    ui = UserInterface(text_gen)
    ui.runProgram()  # Starts the program
Esempio n. 38
0
 def __init__(self, in_path, in_char, payment_path, out_char,
              cust_attr_path, target_attr_path, average_attr_path):
     self.count_rec = CountRecord()
     self.file_io = FileIO()
     self.in_path = in_path
     self.in_char = in_char
     self.payment_path = payment_path
     self.out_char = out_char
     self.cust_attr_path = cust_attr_path
     self.target_attr_path = target_attr_path
     self.average_attr_path = average_attr_path
    def __init__(self, root_cat, depth, log_file, output_dir, root_dir):
        # init logger
        self._logger = mylogger.get_logger(
            DistantExtractor.__name__,
            log_file,
            mylogger.DEBUG
        )
        io_logger = mylogger.get_logger(
            FileIO.__name__,
            log_file,
            mylogger.DEBUG
        )
        wiki_logger = mylogger.get_logger(
            WikipediaExtractor.__name__,
            log_file,
            mylogger.DEBUG
        )
        morph_logger = mylogger.get_logger(
            MorphemeTagger.__name__,
            log_file,
            mylogger.DEBUG
        )
        
        # init instance
        self._file_io = FileIO(output_dir, io_logger)
        self._wiki_extractor = WikipediaExtractor(wiki_logger, self._file_io)
        self._morpheme_tagger = MorphemeTagger(morph_logger, root_dir)
        
        # init args
        self._root_cat = root_cat
        self._limit_depth = depth
        #TODO 後々は複数クラスのシードを持てるようにする
        # name をkey, seeds(list)をvalueなdictにする
        # ラベリングのところはそうなってる
        self._seed_name = 'Car'
        self._seeds = list()
        self._categories = [self._root_cat]

        # init name
        self._seed_dir = 'seeds'
        self._unlabeled_dir = 'unlabeled_corpora'
        self._cleaned_dir = 'cleaned_corpora'
        self._mecab_dir = 'mecab_corpora'
        self._labeled_dir = 'labeled_corpora'
        self._train_dir = 'train_corpora'
        self._output = 'output'
        self._temp_dir = 'temp'
        self._templatefile = '%s/templates/template' % root_dir
        self._trainfile = '%s/train.txt' % output_dir
        self._decodefile = '%s/decode.txt' % output_dir
        self._modelfile = '%s/model' % output_dir
        self._all_labeledfile = '%s/all_labeled.txt' % output_dir
Esempio n. 40
0
def test_save_data():
    stock = testdata()
    io = FileIO()

    filename = 'test_N225.csv'

    io.save_data(stock, "N225", "test_")

    expected = True
    eq_(expected, os.path.exists(filename))

    if os.path.exists(filename):
        os.remove(filename)
Esempio n. 41
0
    def __init__(self):
        #self.lr = LinearRegression()
        self.file_io = FileIO()
        #self.pca = PCAProcess()
        #self.chart = DrawChart()
        self.test = Test()
        self.individual = IndividualTest()
        self.sc = StandardScaler()
        self.ms = MinMaxScaler()
        self.drop_na = DropNaN()

        self.droplist = []
        with open('droplist.txt') as f:
            self.droplist = [s.strip() for s in f.readlines()]
    def run_look_up(self):
        lines = FileIO.read_file_to_lines(self.file_path)
        # spiting labels and sentences
        sentences = [
            l.split('|sep|')[1] for l in lines if len(l.split('|sep|')) > 1
        ]
        print(len(sentences))
        print('## First sentence')
        print(sentences[0])
        print('# char', len(sentences[0]))

        avg_char = 0
        for s in sentences:
            avg_char += len(s)
        print('avg char:', avg_char / len(sentences))
        sentences_in_ascii = [
            self.sentence_to_ascii_list_look_up(sentence)
            for sentence in sentences
        ]
        print('## First sentence in ascii codes')
        print(sentences_in_ascii[0])
        print()

        sentences_in_ascii = self.fixing_dimension(sentences_in_ascii)
        sentences_in_vector = self.generate_look_up_vector(sentences_in_ascii)
        labels = [l.split('|sep|')[0] for l in lines]
        return labels, sentences_in_vector
Esempio n. 43
0
File: pca.py Progetto: Logtre/Xscore
class PCAProcess:
    def __init__(self):
        self.file_io = FileIO()
        self.pca = PrincipleComponentAnalysis()
        self.chart = DrawChart()

    def pca_process(self, file_path, dim_number):
        # ファイルオープン処理
        org_df = self.file_io.open_file_as_pandas(file_path, "utf-8")
        # 不要な顧客IDを削除
        Y = org_df['現金外支払合計'] + org_df['現金支払合計']
        print("Y's shape is {}".format(Y.shape))

        #df = org_df.drop(columns='顧客ID')
        X = org_df.drop(['顧客ID', '現金支払合計', '現金外支払合計'], axis=1)
        print("X's shape is {}".format(X.shape))

        rd = self.pca.fit(X, dim_number)
        df_rd = self.pca.fit_transform(X, dim_number)

        # グラフ描画
        self.chart.pca_scatter_plot(df_rd, Y)

        # 主成分の寄与率を出力する
        print('各次元の寄与率: {0}'.format(rd.explained_variance_ratio_))
        print('累積寄与率: {0}'.format(sum(rd.explained_variance_ratio_)))

        return df_rd, Y
Esempio n. 44
0
    def load_pipette_values(self):
        """Load pipette values from data/pipette_calibrations.json
        """
        logger.debug('head.load_pipette_values called')
        old_values = FileIO.get_dict_from_json(
            os.path.join(self.dir_path,
                         'otone_data/pipette_calibrations.json'))
        logger.debug('old_values:\n')
        logger.debug(old_values)

        if self.PIPETTES is not None and len(self.PIPETTES) > 0:
            for axis in old_values:
                #for n in old_values[axis]:
                for k, v in old_values[axis].items():
                    self.PIPETTES[axis].__dict__[k] = v

                    # should include:
                    #  'resting'
                    #  'top'
                    #  'bottom'
                    #  'blowout'
                    #  'droptip'
                    #  'volume'
                    #  'theContainers'

            logger.debug('self.PIPETTES[{}]:\n\n'.format(axis))
            logger.debug(self.PIPETTES[axis])
        else:
            logger.debug(
                'head.load_pipette_values: No pipettes defined in PIPETTES')
Esempio n. 45
0
    def parse_map() -> (Map, State):
        """ Uses map file to create map object in game.
            :returns The map object and the init state"""

        map_array = FileIO.read_line_by_line(Consts.MAP_FILE)
        sizes = map_array.pop(0)
        h, w = int(sizes[0]), int(sizes[1])
        map_object = Map(h, w)

        butters = []  # Variables to read from map
        points = []
        robot = (0, 0)
        for j, row in enumerate(map_array):
            for i, col in enumerate(row):

                if len(col) > 1:  # If there is an object in map
                    if col[1] == 'b':
                        butters.append((j, i))
                    elif col[1] == 'p':
                        points.append((j, i))
                    elif col[1] == 'r':
                        robot = (j, i)
                    row[i] = col[0]

            map_object.append_row(row)  # Append row to map

        map_object.set_points(points)
        return map_object, State(robot, butters)
Esempio n. 46
0
class SelectColumns:

    def __init__(self, con_path, char_type):
        # 初期化
        self.file_io = FileIO()
        self.extract_col = ExtractColumns()
        self.con_path = con_path
        # ファイルオープン
        self.con = self.file_io.open_file_as_pandas(con_path, char_type)

    def select(self, **kwargs):
        # ターゲットリスト
        tg_list = kwargs['extract_col'] + ['現金外支払合計','現金支払合計']
        # ターゲット列を抽出
        target_col = self.extract_col.extract(self.con, self.con['顧客ID'], extract_col=tg_list)
        # ファイル書き込み
        self.file_io.export_csv_from_pandas(target_col, self.con_path)
Esempio n. 47
0
def instantiate_objects():
    """After connection has been made, instatiate the various robot objects
    """

    global perm_dir_path
    global dir_path

    logger.debug('instantiate_objects called')
    #get default json file
    def_start_protocol = FileIO.get_dict_from_json(
        os.path.join(dir_path, 'data/default_startup_protocol.json'))
    #FileIO.get_dict_from_json('/home/pi/PythonProject/default_startup_protocol.json')

    #instantiate the head
    head = Head(def_start_protocol['head'], publisher, perm_dir_path)
    logger.debug('head string: ')
    logger.debug(str(head))
    logger.debug('head representation: ')
    logger.debug(repr(head))
    #use the head data to configure the head
    head_data = {}
    head_data = prot_dict['head']  #extract the head section from prot_dict

    logger.debug("Head configured!")

    #instantiate the script keeper (sk)

    #instantiate the deck
    deck = Deck(def_start_protocol['deck'], publisher, perm_dir_path)
    logger.debug('deck string: ')
    logger.debug(str(deck))
    logger.debug('deck representation: ')
    logger.debug(repr(deck))

    runner = ProtocolRunner(head, publisher)

    #use the deck data to configure the deck
    deck_data = {}
    deck_data = prot_dict['deck']  #extract the deck section from prot_dict
    #    deck = RobotLib.Deck({})        #instantiate an empty deck
    deck.configure_deck(deck_data)  #configure the deck from prot_dict data
    logger.debug("Deck configured!")

    #do something with the Ingredient data
    ingr_data = {}
    ingr_data = prot_dict[
        'ingredients']  #extract the ingredient section from prot_dict
    ingr = Ingredients({})

    ingr.configure_ingredients(
        ingr_data)  #configure the ingredienets from prot_dict data
    logger.debug('Ingredients imported!')

    publisher.set_head(head)
    publisher.set_runner(runner)
    subscriber.set_deck(deck)
    subscriber.set_head(head)
    subscriber.set_runner(runner)
Esempio n. 48
0
 def container_depth_override(self, container_name, new_depth):
     containers = FileIO.get_dict_from_json(os.path.join(self.dir_path,'otone_data/containers.json'))
     if container_name in containers and new_depth is not None:
         if 'locations' in containers[container_name]:
             containers[container_name]['locations']['depth'] = new_depth
             self.save_containers(containers)
             self.publish_containers()
         else:
             logger.error('error in deck.container_depth_override, locations not in containers--> {}'.format(container_name))
Esempio n. 49
0
def demo(code='N225',
         name='日経平均株価',
         start='2014-01-01',
         days=240,
         csvfile=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
                              'test', 'stock_N225.csv'),
         update=False):

    # Handling ti object example.
    io = FileIO()
    stock_d = io.read_from_csv(code, csvfile)
    ti = TechnicalIndicators(stock_d)
    ti.calc_ret_index()

    print(ti.stock['ret_index'].tail(10))
    io.save_data(io.merge_df(stock_d, ti.stock), code, 'demo_')

    # Run analysis code example.
    analysis = Analysis(code=code,
                        name=name,
                        start=start,
                        days=days,
                        csvfile=csvfile,
                        update=True)
    return analysis.run()
Esempio n. 50
0
    def save_pipette_values(self):
        """Save pipette values to otone_data/pipette_values.json
        """
        pipette_values = {}

        params_to_save = [
            'resting',
            'top',
            'bottom',
            'blowout',
            'droptip',
            'volume',
            'theContainers',
            'tip_racks',
            'trash_container',
            'tip_rack_origin'
        ]

        for axis in self.PIPETTES:
            pipette_values[axis] = {}
            for k, v in self.PIPETTES[axis].__dict__.items():
                # make sure we're only saving what we need from that pipette module
                if k in params_to_save:
                    pipette_values[axis][k] = v

            # should include:
            #  'top'
            #  'bottom'
            #  'blowout'
            #  'droptip'
            #  'volume'
            #  'theContainers'

        filetext = json.dumps(pipette_values,sort_keys=True,indent=4,separators=(',',': '))
        
        filename = os.path.join(self.dir_path,'otone_data/pipette_calibrations.json')

        # save the pipette's values to a local file, to be loaded when the server restarts
        FileIO.writeFile(filename,filetext,lambda: logger.debug('\t\tError saving the file:\r\r'))      
Esempio n. 51
0
def main():
    args = sys.argv[1:]

    # Проверка количества аргументов
    if len(args) < 5 or (len(args) & 1) != 1:
        raise ValueError(f'Invalid number of arguments: {len(args)}')

    f_in = args[0]
    f_out = args[1]
    prof = args[2].upper()

    sections = []

    for i in range(3, len(args), 2):
        try:
            start = float(args[i])
            end = float(args[i + 1])
        except ValueError:
            raise ValueError(f'Incorrect input: [{start}, {end}]')

        # Проверка корректности интервала
        if start > end or start < 0:
            raise ValueError(f'Incorrect section: [{start}, {end}]')

        # Проверка дублирования интервала
        sec = Section(start, end)
        if not sec in sections:
            sections.append(sec)

    # Для .zip файлов
    if f_in.endswith('.zip'):
        with zipfile.ZipFile(f_in, 'r') as zp:
            f_in = f_in[:-4] + '.csv'
            if f_in in zp.namelist():
                zp.extract(f_in)
            else:
                raise ValueError(f'File {f_in} was not found in archive')

    if prof == 'CPU':
        cpu = CPU(f_in, sections)
        cpu.filter()

        top_processes = cpu.get_top_processes()
        top_modules = cpu.get_top_modules()

        # Вывод в файл
        log_cpu(f_out, top_processes, top_modules)

    elif prof == 'FILE_IO':
        file_io = FileIO(f_in, sections)
        file_io.filter()

        top_durations = file_io.get_top_durations()
        top_sizes = file_io.get_top_sizes()

        # Вывод в файл
        log_file_io(f_out, top_durations, top_sizes)
Esempio n. 52
0
class DecisionTree:
    def __init__(self, path, depth):
        self.clf = tree.DecisionTreeClassifier(max_depth=3)
        self.file_io = FileIO()
        #self.pca = PCAProcess()
        #self.chart = DrawChart()
        self.test = Test()
        self.file_path = path

    def analyze(self):
        # ファイルオープン処理
        org_df = self.file_io.open_file_as_pandas(self.file_path, "utf-8")

        # 目的変数Xと説明変数Y
        Y = org_df['現金外支払合計'] + org_df['現金支払合計']
        X = org_df.drop(['顧客ID', '現金支払合計', '現金外支払合計'], axis=1)
        # Xの各列を正規化
        X_normal = X.apply(lambda x: (x - np.mean(x)) /
                           (np.max(x) - np.min(x)))

        # トレーニングデータとテストデータに分割(30%)
        X_train, X_test, Y_train, Y_test = self.test.make_train_test_data(
            X_normal, Y, 0.3)
        print(X_train.head())
        print("--- X_train's shape ---\n {}\n".format(X_train.shape))
        print(X_test.head())
        print("--- X_test's shape ---\n {}\n".format(X_test.shape))
        print(Y_train.head())
        print("--- Y_train's shape ---\n {}\n".format(Y.shape))
        print(Y_test.head())
        print("--- Y_test's shape ---\n {}\n".format(Y.shape))

        # 分析を実施
        predicted = self.clf.fit(X_train, Y_train)
        # 識別率を確認
        ratio = sum(predicted == Y_train) / len(Y_train)

        # 精度を算出
        # トレーニングデータ
        print(" --- train score ---\n {}\n".format(
            self.lr.score(X_train, Y_train)))
        # テストデータ
        print(" --- test score ---\n {}\n".format(
            self.lr.score(X_train, Y_train)))

        return self.lr.score(X_train, Y_train), self.lr.score(X_train, Y_train)
Esempio n. 53
0
f = open(os.devnull, 'w')
sys.stdout = f
sys.stderr = f

from head import Head
from deck import Deck

from subscriber import Subscriber
from publisher import Publisher

from file_io import FileIO
from ingredients import Ingredients

from protocol_runner import ProtocolRunner

prot_dict = FileIO.get_dict_from_json(fname_default_protocol)

#VARIABLES

#declare globol objects here
head = None
deck = None
runner = None
subscriber = None
publisher = None
def_start_protocol = None
client_status = False
crossbar_status = False

#Import and setup autobahn WAMP peer
from autobahn.asyncio import wamp, websocket
Esempio n. 54
0
class Modeller(ModellerSetting):
    def __init__(self):
        self.file_io = FileIO()
        self.visual = Visualization()
        self.logger = set_logger(LOGGER_PATH, self.LOGGER_FILE,
                                 self.LOGGER_LEVEL, __name__)
        ktf.set_session(get_session())

    def save_estimator(self, estimator, file_name):
        self.file_io.save_file(estimator, self.MODEL_PATH, file_name, 'joblib')

    def load_estimator(self, file_name):
        estimator = self.file_io.load_file(self.MODEL_PATH, file_name,
                                           'joblib')
        return estimator

    @staticmethod
    def notify_model_fitting_failure(message):
        send_message('Error in fitting model. {}'.format(message),
                     NOTIFIER_AGENT)

    def try_save_notify_exit(self,
                             func,
                             estimator,
                             x_data,
                             y_data,
                             *args,
                             model_file_name=None,
                             verb=False,
                             **kwargs):
        try:
            estimator = func(estimator, x_data, y_data, *args, **kwargs)
            if model_file_name is not None:
                self.save_estimator(estimator, model_file_name)
            if verb:
                self.logger.debug(
                    'Model {} fitting completed'.format(model_file_name))
            return estimator
        except KeyboardInterrupt:
            sys.exit()
        except Exception as e:
            self.notify_model_fitting_failure('{}. model_file_name: {}'.format(
                e, model_file_name))
            sys.exit()

    def set_feature_scaler(self, name, **kwargs):
        if name in self.SCALER_DICT:
            scaler = self.SCALER_DICT[name](**kwargs)
        else:
            self.logger.error('Unknown scaler name {}'.format(name))
            scaler = None
        return scaler

    @staticmethod
    def feature_scaling(scaler, x_data, fit=True):
        if fit:
            model = scaler.fit(x_data)
            return model
        else:
            transformed_data = scaler.transform(x_data)
            return transformed_data

    def set_estimator(self, name, **kwargs):
        if name in self.ESTIMATOR_DICT:
            estimator = self.ESTIMATOR_DICT[name](**kwargs)
        else:
            self.logger.error('Unknown estimator name {}'.format(name))
            estimator = None
        return estimator

    @staticmethod
    def _estimator_fit(estimator, x_data, y_data, **kwargs):
        estimator = estimator.fit(x_data, y_data, **kwargs)
        return estimator

    def pure_estimation(self,
                        estimator,
                        x_data,
                        y_data,
                        fit=True,
                        model_file_name=None,
                        verb=False,
                        **kwargs):
        if fit:
            estimator = self.try_save_notify_exit(
                self._estimator_fit,
                estimator,
                x_data,
                y_data,
                model_file_name=model_file_name,
                verb=verb,
                **kwargs)
            return estimator
        else:
            # ktf.get_session().run(tf.global_variables_initializer())
            estimated_data = estimator.predict(x_data)
            return estimated_data

    def model_residual(self,
                       estimator,
                       x_data,
                       y_data,
                       fit=False,
                       model_file_name=None,
                       verb=False,
                       **kwargs):
        if fit:
            estimator = self.pure_estimation(estimator, x_data, y_data, True,
                                             model_file_name, verb, **kwargs)
        estimated_data = self.pure_estimation(estimator, x_data, y_data, False,
                                              model_file_name, verb)
        residual_data = y_data - estimated_data
        return residual_data

    @staticmethod
    def metric_to_scorer(metric, **kwargs):
        return make_scorer(metric, **kwargs)

    def set_scorer(self, name, make_score=True, **kwargs):

        if name in self.SCORER_DICT:
            scorer = self.SCORER_DICT[name]
            if make_score:
                scorer = self.metric_to_scorer(
                    scorer['func'],
                    greater_is_better=scorer['greater_better'],
                    **kwargs)
            else:
                scorer = scorer['func']
        else:
            self.logger.error('Unknown scorer name {}'.format(name))
            scorer = None
        return scorer

    def model_scoring(self,
                      estimator,
                      x_data,
                      y_data,
                      metric,
                      fit=False,
                      model_file_name=None,
                      verb=False,
                      **kwargs):
        if fit:
            estimator = self.pure_estimation(estimator, x_data, y_data, True,
                                             model_file_name, verb, **kwargs)
        estimated_data = self.pure_estimation(estimator, x_data, y_data, False,
                                              model_file_name, verb)
        score = metric(y_data, estimated_data)
        return score

    @staticmethod
    def set_estimation_pipeline(scaler, estimator):
        estimator = Pipeline([('scaler', scaler), ('estimator', estimator)])
        return estimator

    def train_valid_evaluation(self,
                               estimator,
                               x_train,
                               y_train,
                               x_valid,
                               y_valid,
                               scorer,
                               fit=False,
                               model_file_name=None,
                               verb=False,
                               **kwargs):
        if fit:
            estimator = self.pure_estimation(estimator, x_train, y_train, True,
                                             model_file_name, verb, **kwargs)
        train_score = self.model_scoring(estimator, x_train, y_train, scorer,
                                         False)
        valid_score = self.model_scoring(estimator, x_valid, y_valid, scorer,
                                         False)
        score = {'train': train_score, 'valid': valid_score}
        return score

    def set_cv(self, cv_name, **kwargs):
        if cv_name in self.CV_DICT:
            cv = self.CV_DICT[cv_name](**kwargs)
        else:
            self.logger.error('Unknown scorer name {}'.format(cv_name))
            cv = None
        return cv

    @staticmethod
    def _cross_validation(estimator, x_data, y_data, scorer, **kwargs):
        estimator = cross_validate(estimator,
                                   x_data,
                                   y_data,
                                   scoring=scorer,
                                   **kwargs)
        return estimator

    def cross_validation(self,
                         estimator,
                         x_data,
                         y_data,
                         scorer,
                         model_file_name=None,
                         verb=False,
                         **kwargs):
        estimator = self.try_save_notify_exit(self._cross_validation,
                                              estimator,
                                              x_data,
                                              y_data,
                                              scorer,
                                              model_file_name=model_file_name,
                                              verb=verb,
                                              **kwargs)
        return estimator

    def validation_curve(self, estimator, x_data, y_data, para_range_dict,
                         scorer, plot_file_name, **kwargs):
        para_name = list(para_range_dict.keys())[0]
        para_values = para_range_dict[para_name]
        train_score, valid_score = validation_curve(estimator,
                                                    x_data,
                                                    y_data,
                                                    para_name,
                                                    para_values,
                                                    scoring=scorer,
                                                    **kwargs)
        train_score = np.mean(train_score, axis=1)
        valid_score = np.mean(valid_score, axis=1)
        data = pd.DataFrame({
            **para_range_dict,
            **{
                'train_score': train_score,
                'valid_score': valid_score
            }
        })
        self.visual.time_series_plot(
            plot_file_name,
            data,
            para_name, ['train_score', 'valid_score'],
            title_dict={
                'title': 'Validation curve for parameter {}'.format(para_name),
                'x_title': para_name,
                'y_title': 'score'
            })
        return data

    def learning_curve(self, estimator, x_data, y_data, scorer, plot_file_name,
                       **kwargs):
        train_sizes, train_score, valid_score = learning_curve(estimator,
                                                               x_data,
                                                               y_data,
                                                               scoring=scorer,
                                                               **kwargs)
        data = pd.DataFrame({
            'train_size': train_sizes,
            'train_score': train_score,
            'valid_score': valid_score
        })
        self.visual.time_series_plot(plot_file_name,
                                     data,
                                     'train_size',
                                     ['train_score', 'valid_score'],
                                     title_dict={
                                         'title': 'Learning curve',
                                         'x_title': 'train_size',
                                         'y_title': 'score'
                                     })
        return data

    def para_search(self,
                    estimator,
                    x_data,
                    y_data,
                    method,
                    search_para,
                    scorer,
                    model_file_name=None,
                    verb=False,
                    **kwargs):
        searcher = self.SEARCHER_DICT[method]
        estimator = searcher(estimator, search_para, scoring=scorer, **kwargs)
        estimator = self.pure_estimation(estimator, x_data, y_data, True,
                                         model_file_name, verb)
        return estimator

    def hyperopt_search(self,
                        scaler_dict,
                        estimator_dict,
                        x_data,
                        y_data,
                        method,
                        search_para,
                        scorer,
                        model_file_name=None,
                        verb=False,
                        **kwargs):

        set_feature_scaler = self.set_feature_scaler
        set_estimator = self.set_estimator
        set_estimation_pipeline = self.set_estimation_pipeline

        def hyperopt_min_func(space):
            scaler_kwargs = scaler_dict['kwargs']
            estimator_kwargs = estimator_dict['kwargs']
            for param_key, param_value in space.items():
                if 'scaler' in param_key:
                    if param_value['dtype'] is None:
                        scaler_kwargs[param_key.replace(
                            'scaler__', '')] = param_value['dist']
                    else:
                        scaler_kwargs[param_key.replace(
                            'scaler__',
                            '')] = param_value['dtype'](param_value['dist'])
                if 'estimator' in param_key:
                    if param_value['dtype'] is None:
                        estimator_kwargs[param_key.replace(
                            'estimator__', '')] = param_value['dist']
                    else:
                        estimator_kwargs[param_key.replace(
                            'estimator__',
                            '')] = param_value['dtype'](param_value['dist'])
            if 'optimizer' in estimator_kwargs and 'lr' in estimator_kwargs:
                estimator_kwargs['optimizer'] = estimator_kwargs['optimizer'](
                    lr=estimator_kwargs['lr'])
                estimator_kwargs.pop('lr')
            scaler = set_feature_scaler(scaler_dict['name'],
                                        **scaler_dict['kwargs'])
            estimator = set_estimator(estimator_dict['name'],
                                      **estimator_dict['kwargs'])
            model = set_estimation_pipeline(scaler, estimator)
            score = -cross_val_score(
                model, x_data, y_data, scoring=scorer, **kwargs).mean()
            if 'nn' in estimator_dict['name'] or 'lstm' in estimator_dict[
                    'name'] or 'cnn' in estimator_dict['name']:
                K.clear_session()
            return score

        searcher = self.SEARCHER_DICT[method]
        trials = Trials()
        best = fmin(fn=hyperopt_min_func,
                    space=search_para,
                    algo=searcher,
                    max_evals=self.HYPEROPT_MAX_ITER,
                    trials=trials)
        log = trials.trials
        self.save_estimator(log, 'tmp_hyperpt_search_log.pkl')
        best_para = {key: best[key] for key in search_para.keys()}
        self.save_estimator(best_para, 'temp_hyperopt_best_para.pkl')
        scaler_kwargs = scaler_dict['kwargs']
        estimator_kwargs = estimator_dict['kwargs']
        for param_key, param_value in best_para.items():
            if 'scaler' in param_key:
                if search_para[param_key]['dtype'] is None:
                    scaler_kwargs[param_key.replace(
                        'scaler__',
                        '')] = search_para[param_key]['choice'][param_value]
                else:
                    scaler_kwargs[param_key.replace(
                        'scaler__',
                        '')] = search_para[param_key]['dtype'](param_value)
            if 'estimator' in param_key:
                if search_para[param_key]['dtype'] is None:
                    estimator_kwargs[param_key.replace(
                        'estimator__',
                        '')] = search_para[param_key]['choice'][param_value]
                else:
                    estimator_kwargs[param_key.replace(
                        'estimator__',
                        '')] = search_para[param_key]['dtype'](param_value)
        if 'optimizer' in estimator_kwargs and 'lr' in estimator_kwargs:
            estimator_kwargs['optimizer'] = estimator_kwargs['optimizer'](
                lr=estimator_kwargs['lr'])
            estimator_kwargs.pop('lr')
        scaler = set_feature_scaler(scaler_dict['name'],
                                    **scaler_dict['kwargs'])
        estimator = set_estimator(estimator_dict['name'],
                                  **estimator_dict['kwargs'])
        estimator = set_estimation_pipeline(scaler, estimator)
        estimator = self.pure_estimation(estimator, x_data, y_data, True,
                                         model_file_name, verb,
                                         **kwargs['fit_params'])
        return estimator, log
Esempio n. 55
0
def runMRJob(mrJobClass, outputFileName, inputFileList, args='-r hadoop'.split(), **kwargs):
    mrJob = mrJobClass(args=args)
    GeneralMethods.runCommand('rm -rf %s'%outputFileName)
    for l in mrJob.runJob(inputFileList=inputFileList, **kwargs): FileIO.writeToFileAsJson(l[1], outputFileName)
Esempio n. 56
0
 def __init__(self):
     self.file_io = FileIO()
     self.visual = Visualization()
     self.logger = set_logger(LOGGER_PATH, self.LOGGER_FILE,
                              self.LOGGER_LEVEL, __name__)
     ktf.set_session(get_session())
Esempio n. 57
0
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(),
              metrics=['accuracy'])
## END OF MODEL ##
loss = []
acc = []
val_acc = []
start_time = datetime.datetime.now()
for i in range(0, 20):
    history = model.fit(x_train, y_train, 128, epoch_step,
                        verbose=1, validation_data=(x_test, y_test))
    end_time = datetime.datetime.now()
    print(str(end_time - start_time))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    ## SAVE
    loss = loss + history.history['loss']
    acc = acc + history.history['acc']
    val_acc = val_acc + history.history['val_acc']
    print(len(loss))
    lines = []
    lines.append(str(end_time - start_time))
    lines.append(','.join([str(a) for a in loss]))
    lines.append(','.join([str(a) for a in acc]))
    lines.append(','.join([str(a) for a in val_acc]))
    FileIO.write_lines_to_file('./gpu_cnn_' + str(num_conv_block) + '_convB_6_layers.log', lines)
    model.save(
        './models/gpu_cnn_epoch_' + str((i + 1) * epoch_step) + 'ep_' + str(
            num_conv_block) + '_convB_6_layers.h5')
Esempio n. 58
0
def savefig(output_file):
    print 'Saving figure: ', output_file
    FileIO.createDirectoryForFile(output_file)
    plt.savefig(output_file, bbox_inches='tight')
    plt.clf()
Esempio n. 59
0
    def run(self):
        io = FileIO()
        will_update = self.update

        if self.csvfile:
            stock_tse = io.read_from_csv(self.code, self.csvfile)

            msg = "".join([
                "Read data from csv: ", self.code, " Records: ",
                str(len(stock_tse))
            ])
            print(msg)

            if self.update and len(stock_tse) > 0:
                index = pd.date_range(start=stock_tse.index[-1],
                                      periods=2,
                                      freq='B')
                ts = pd.Series(None, index=index)
                next_day = ts.index[1]
                t = next_day.strftime('%Y-%m-%d')
                newdata = io.read_data(self.code, start=t, end=self.end)

                msg = "".join([
                    "Read data from web: ", self.code, " New records: ",
                    str(len(newdata))
                ])
                print(msg)
                if len(newdata) < 1:
                    will_update = False
                else:
                    print(newdata.ix[-1, :])

                stock_tse = stock_tse.combine_first(newdata)
                io.save_data(stock_tse, self.code, 'stock_')
        else:
            stock_tse = io.read_data(self.code, start=self.start, end=self.end)

            msg = "".join([
                "Read data from web: ", self.code, " Records: ",
                str(len(stock_tse))
            ])
            print(msg)

        if stock_tse.empty:
            msg = "".join(["Data empty: ", self.code])
            print(msg)
            return None

        if not self.csvfile:
            io.save_data(stock_tse, self.code, 'stock_')

        try:
            stock_d = stock_tse.asfreq('B').dropna()[self.days:]

            ti = TechnicalIndicators(stock_d)

            ti.calc_sma()
            ti.calc_sma(timeperiod=5)
            ti.calc_sma(timeperiod=25)
            ti.calc_sma(timeperiod=50)
            ti.calc_sma(timeperiod=75)
            ewma = ti.calc_ewma(span=5)
            ewma = ti.calc_ewma(span=25)
            ewma = ti.calc_ewma(span=50)
            ewma = ti.calc_ewma(span=75)
            bbands = ti.calc_bbands()
            sar = ti.calc_sar()
            draw = Draw(self.code, self.fullname)

            ret = ti.calc_ret_index()
            ti.calc_vol(ret['ret_index'])
            rsi = ti.calc_rsi(timeperiod=9)
            rsi = ti.calc_rsi(timeperiod=14)
            mfi = ti.calc_mfi()
            roc = ti.calc_roc(timeperiod=10)
            roc = ti.calc_roc(timeperiod=25)
            roc = ti.calc_roc(timeperiod=50)
            roc = ti.calc_roc(timeperiod=75)
            roc = ti.calc_roc(timeperiod=150)
            ti.calc_cci()
            ultosc = ti.calc_ultosc()
            stoch = ti.calc_stoch()
            ti.calc_stochf()
            ti.calc_macd()
            willr = ti.calc_willr()
            ti.calc_momentum(timeperiod=10)
            ti.calc_momentum(timeperiod=25)
            tr = ti.calc_tr()
            ti.calc_atr()
            ti.calc_natr()
            vr = ti.calc_volume_rate()

            ret_index = ti.stock['ret_index']
            clf = Classifier(self.clffile)
            train_X, train_y = clf.train(ret_index, will_update)
            msg = "".join(["Train Records: ", str(len(train_y))])
            print(msg)
            clf_result = clf.classify(ret_index)[0]
            msg = "".join(["Classified: ", str(clf_result)])
            print(msg)
            ti.stock.ix[-1, 'classified'] = clf_result

            reg = Regression(self.regfile, alpha=1, regression_type="Ridge")
            train_X, train_y = reg.train(ret_index, will_update)
            msg = "".join(["Train Records: ", str(len(train_y))])
            base = ti.stock_raw['Adj Close'][0]
            reg_result = int(reg.predict(ret_index, base)[0])
            msg = "".join(["Predicted: ", str(reg_result)])
            print(msg)
            ti.stock.ix[-1, 'predicted'] = reg_result

            if len(self.reference) > 0:
                ti.calc_rolling_corr(self.reference)
                ref = ti.stock['rolling_corr']
            else:
                ref = []

            io.save_data(io.merge_df(stock_d, ti.stock), self.code, 'ti_')

            draw.plot(stock_d,
                      ewma,
                      bbands,
                      sar,
                      rsi,
                      roc,
                      mfi,
                      ultosc,
                      willr,
                      stoch,
                      tr,
                      vr,
                      clf_result,
                      reg_result,
                      ref,
                      axis=self.axis,
                      complexity=self.complexity)

            return ti

        except (ValueError, KeyError):
            msg = "".join(["Error occured in ", self.code])
            print(msg)
            return None
stat_lines = []
adagrad = elephas_optimizers.Adagrad()
for i in range(0, 200):
    # Train Spark model
    # Initialize SparkModel from Keras model and Spark context
    spark_model = SparkModel(sc,
                             model,
                             mode='asynchronous',
                             frequency='epoch',
                             num_workers=1,
                             optimizer=adagrad)
    spark_model.train(rdd,
                      nb_epoch=num_epoch_in_one_step,
                      batch_size=batch_size,
                      verbose=0,
                      validation_split=0.1)
    score1 = model.evaluate(x_train, y_train, verbose=0)
    score2 = model.evaluate(x_test, y_test, verbose=0)
    print('#############################')
    print('Finished epochs', (i + 1) * num_epoch_in_one_step)
    print('Train accuracy:', score1[1])
    print('Test accuracy:', score2[1])
    print('#############################')
    stat_lines.append(
        str((i + 1) * 10) + ', ' + str(score1[1]) + ', ' + str(score2[1]))
    FileIO.write_lines_to_file('./cnn_1.log', stat_lines)
    if (i + 1) % 10 == 0 and i != 0:
        model.save('./models/cnn_1_' + str((i + 1) * 10) + 'ep.h5')
# sc.stop()
## END OF SPARK ##