Exemplo n.º 1
0
def main():
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting NewsGrabber')

    tools.create_dir(settings.dir_new_urllists)
    tools.create_dir(settings.dir_old_urllists)
    tools.create_dir(settings.dir_donefiles)
    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_last_upload)

    if not os.path.isfile('rsync_targets'):
        settings.logger.log("Please add one or more rsync targets to file 'rsync_targets'", 'ERROR')
    if not os.path.isfile('rsync_targets_discovery'):
        settings.logger.log("Please add one or more discovery rsync targets to file 'rsync_targets_discovery'", 'ERROR')

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.run_services = service.RunServices()
    settings.run_services.daemon = True
    settings.run_services.start()
    
    while settings.running:
        time.sleep(1)
Exemplo n.º 2
0
def pkl_path(folder, pid=None):
    """ Returns the path (and creates it, if necessary) to the stored
    pandas data file.

    Parameters
    ----------	
    folder : str
        Path to the chain_root or the output_root
    pid : int
        Process ID in case of parallel exectution

    Returns
    -------
    output_path : str
        Full file path to pickle file
    
    """
    if pid is None:
        # Main file with complete content
        filename = 'data.pkl'
    else:
        # Split file per process
        filename = 'data_' + str(pid) + '.pkl'

    if pid is None:
        output_folder = os.path.join(folder, 'check_output')
    else:
        output_folder = os.path.join(folder, 'check_output', 'data')
    tools.create_dir(output_folder,'check_output')
    output_path = os.path.join(output_folder, filename)
        
    return output_path
Exemplo n.º 3
0
def main():
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting NewsGrabber')

    tools.create_dir(settings.dir_assigned_services)

    if not os.path.isfile(settings.target):
        settings.logger.log(
            "Please add one or more discovery rsync targets to file '{settings.target}'"
            .format(**locals()), 'ERROR')

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = service.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.run_services = service.RunServices()
    settings.run_services.daemon = True
    settings.run_services.start()

    while settings.running:
        time.sleep(1)
Exemplo n.º 4
0
    def run(self, show_img=True):
        """
        Run genetic algorithm
        """
        try:
            prev_score = INDIVIDUAL_SIZE
            mutation_chance = MUTATION_CHACE
            portion_of_mutation = PORTION_OF_MUTATION
            mutate_worst = True

            for iterNumber in range(ITERS):
                with MeasureTime('one iter', level='info'):
                    self.evaluate()
                    self.population.sort()
                    
                    if iterNumber % 100 == 0:
                        best = self.best
                        worse = self.worse
                        title = f"#{iterNumber}. best is {best.score}, worse is {worse.score}"
                        logging.warning(title)
                        if show_img:
                            show_image(best.img, title)
                        
                        directory = join(OUTPUT_DIR, self._id)
                        create_dir(directory)
                        image_name = f"{str(iterNumber // 100).zfill(4)}.png"
                        best.img.save(join(directory, image_name))

                        delta = best.score - prev_score
                        prev_score = best.score
                        if delta > 1024 * 0.001:
                            # switch to random mode
                            mutate_worst = False
                            mutation_chance = 1 / 30
                            portion_of_mutation = 5 / 1024
                            logging.warning(f"Delta is {delta}. New chance: {mutation_chance}, share: {portion_of_mutation}, mutate_worst={mutate_worst}")

                    new_population = self.crossover()
                    self.select()
                    self.population.extend(new_population)
                    self.size = len(self.population)
                    
                    assert self.size == POPULATION_SIZE
                    
                    self.mutation(mutation_chance, portion_of_mutation, mutate_worst)

            
            self.best.save(OUTPUT_DIR)
        except KeyboardInterrupt as e:
            self.best.save(OUTPUT_DIR)
            raise e
def file_path(date, code_url, directory, lang):
    (dd, mm, yyyy) = date
    subdirectory = tools.create_dir(directory, yyyy, mm)  # check if create dir
    filename = '%d-%0.2d-%0.2d_celex_%s.%s.html' % (int(yyyy), int(mm),
                                                    int(dd), code_url, lang)
    filepath = os.path.join(subdirectory, filename)
    return filepath
def process(content, code_url, directory, lang, date,
            options):  #récupération et stockage des communiqués
    try:
        content_cut = cut_content(content)
    except:
        if options.log != '':
            logline = '%s %s' % (code_url, date)
            f = open(options.log, 'a')
            print >> f, logline
            f.close()
        return
    (dd, mm, yyyy) = date
    filepath = file_path(date, code_url, directory, lang)
    subdirectory = tools.create_dir(directory, yyyy, mm)  # check if create dir
    filename_css = lss.construct_css(content, subdirectory, date, code_url,
                                     lang)
    tag_document_open = '''<html>
  <head>
  <meta http-equiv="content-type" content="text/html;charset=utf-8"/>
  <link href="%s" type="text/css" rel="stylesheet">
  <meta name="Language" content="%s" />
  <meta name="celex" content="%s" />
  </head>
  <body>
''' % (filename_css, lang, code_url)
    tag_document_close = '''
  </body>
</html>'''
    doc = '%s%s%s' % (tag_document_open, content_cut, tag_document_close)
    f = open(filepath, 'w')
    print >> f, doc
    f.close()
def copy_doc(list_path, options):
    dic_newpath = {}
    for path in list_path:
        dirs, filename = os.path.split(path)
        directory, yyyy, mm = dirs.split(os.sep)[-3:]
        subdirectory = tools.create_dir(opt.output_dir, yyyy,
                                        mm)  # check if create dir
        rootsubdir = os.sep.join(subdirectory.split(os.sep)[-3:])
        newpath = os.path.join(subdirectory, filename)
        newpathmanifest = os.path.join(rootsubdir, filename)

        lg = path2lg(path)
        if lg not in dic_newpath:
            dic_newpath[lg] = []
        dic_newpath[lg].append(newpathmanifest)

        filename_css = '.'.join(filename.split('.')[:-1]) + '.css'
        newpath_css = os.path.join(subdirectory, filename_css)
        path_css = os.path.join(dirs, filename_css)
        if options.clean:
            clean_html(path, newpath)
        else:
            shutil.copyfile(path, newpath)
            shutil.copyfile(path_css, newpath_css)
    return dic_newpath
def process(content, code_url, directory, lang, date, options) :   #récupération et stockage des communiqués
  try :
    content_cut = cut_content(content)
  except :
    if options.log != '' :
      logline = '%s %s'%(code_url, date)
      f = open(options.log, 'a')
      print >>f, logline
      f.close()
    return
  (dd,mm,yyyy)= date
  filepath = file_path(date, code_url, directory, lang)
  subdirectory = tools.create_dir(directory, yyyy, mm) # check if create dir
  filename_css = lss.construct_css(content, subdirectory, date, code_url, lang)
  tag_document_open = '''<html>
  <head>
  <meta http-equiv="content-type" content="text/html;charset=utf-8"/>
  <link href="%s" type="text/css" rel="stylesheet">
  <meta name="Language" content="%s" />
  <meta name="celex" content="%s" />
  </head>
  <body>
'''%(filename_css,lang, code_url)
  tag_document_close = '''
  </body>
</html>'''
  doc = '%s%s%s'%(tag_document_open,content_cut,tag_document_close)
  f = open(filepath,'w')
  print >>f, doc
  f.close()
Exemplo n.º 9
0
def maps_path(cfg):
    """ Returns the path (and creates it, if necessary) to the map plots.

    Parameters
    ----------	
    cfg : config-object
        Object holding all user-configuration parameters as attributes

    Returns
    -------
    output_folder : str
        Full file path to maps directory
    """
    output_folder = os.path.join(cfg.chain_root, 'check_output', 'maps')
    tools.create_dir(output_folder, 'check_output maps')
    
    return output_folder
Exemplo n.º 10
0
def timeseries_path(cfg):
    """ Returns the path (and creates it, if necessary) to the timeseries
    plots.

    Parameters
    ----------	
    cfg : config-object
        Object holding all user-configuration parameters as attributes

    Returns
    -------
    output_path : str
        Full file path to timeseries directory
    """
    output_folder = os.path.join(cfg.output_root, 'check_output', 'timeseries')
    tools.create_dir(output_folder, 'timeseries')
    output_path = os.path.join(cfg.output_root, 'check_output', 'timeseries')
    
    return output_path
def do_convergence_test(trajectory, traj_type):
    path, file_name = os.path.split(trajectory["path"])
    traj_id = file_name.split(".")[0]
    base_path = os.path.join("convergence",traj_type,"%s"%traj_id)

    for n in range(1000,10000,1000):
        print "- Working with %s with %d frames"%(trajectory["path"],n)
        this_path = os.path.join(base_path, "%d"%n)
        create_dir(this_path)
        pdb_path = os.path.join(this_path,"%d.pdb"%n)
        extract_first_n_frames(n, trajectory["path"], pdb_path)
        script = copy.deepcopy(script_template)
        script["global"]["workspace"]["base"] = this_path
        script["data"]["files"] = [pdb_path]
#         script["clustering"]["evaluation"]["maximum_noise"] = trajectory["noise"]
#         script["clustering"]["evaluation"]["minimum_cluster_size"] = int(n/trajectory["max"])
#         script["clustering"]["evaluation"]["minimum_clusters"] = trajectory["min"]
#         script["clustering"]["evaluation"]["maximum_clusters"] = trajectory["max"]
        script_path = os.path.join(this_path,"script.json")
        save_dic_in_json(script, script_path)
        os.system("python %s %s "%(PYPROCT, script_path))
        os.system("rm %s"%pdb_path)
Exemplo n.º 12
0
def create_map_directories(cfg, data, units):
    """Create folders for the 2D maps of variables.

    Parameters
    ----------	
    cfg : config-object
        Object holding all user-configuration parameters as attributes
    data: pandas.DataFrame
        Dataframe containing diagnostic values for each variable
    units : dict
        Dictionary of units per variable name
    """
    output_path = maps_path(cfg)
    data_path = pkl_path(cfg.output_root)
    df = pd.read_pickle(data_path)

    # Get variable names
    varnames = get_variable_names(df.columns.values)
    # Create directories per variable for map plots
    for varname in varnames:
        output_folder = os.path.join(output_path, varname)
        tools.create_dir(output_folder, varname)
Exemplo n.º 13
0
def main():
    if os.path.isfile('UPDATE'):
        os.remove('UPDATE')
    if os.system('service rsync status') != 0:
        print('rsync not running; attempting to start')
        try:
            os.system('service rsync start')
        except OSError:
            print('failed to start rsync service')
            os.system('service rsync status')
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting NewsGrabber')

    tools.create_dir(settings.dir_assigned_services)

    if not os.path.isfile(settings.target):
        settings.logger.log("Please add one or more discovery rsync targets to file '{target}'".format(target=settings.target), 'ERROR')

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = discoveryservice.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.run_services = discoveryservice.RunServices()
    settings.run_services.daemon = True
    settings.run_services.start()
    
    while settings.running:
        if os.path.isfile('STOP'):
            os.remove('STOP')
            open('UPDATE', 'w').close()
            break
        time.sleep(1)
Exemplo n.º 14
0
def main():
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log(
        'Starting grabber {name}'.format(name=settings.irc_nick))

    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_new_lists)
    tools.create_dir(settings.dir_old_lists)

    if not os.path.isfile(settings.target_main):
        raise Exception(
            "Please add a rsync target to file '{settings.target_main}'.".
            format(**locals()))

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.grab = grab.Grab()
    settings.grab.daemon = True
    settings.grab.start()

    while settings.running:
        #    if not settings.logger.isAlive():
        #        print('The logger stopped running...')
        #        settings.irc_bot.send('PRIVMSG', 'The logger stopped running...',
        #                settings.irc_channel_bot)
        #        settings.running = False
        #    if not settings.irc_bot.isAlive():
        #        print('The IRC bot stopped running...')
        #        settings.running = False
        #    if not settings.upload.isAlive():
        #        print('The uploader stopped running...')
        #        settings.irc_bot.send('PRIVMSG', 'The uploader stopped running...',
        #                settings.irc_channel_bot)
        #        settings.running = False
        #    if not settings.grab.isAlive():
        #        print('The grabber stopped running...')
        #        settings.irc_bot.send('PRIVMSG', 'The grabber stopped working...',
        #                settings.irc_channel_bot)
        #        settings.running = False
        time.sleep(1)
Exemplo n.º 15
0
def main():
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting grabber {name}'.format(
            name=settings.irc_nick))

    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_new_lists)
    tools.create_dir(settings.dir_old_lists)

    if not os.path.isfile(settings.target_main):
        raise Exception("Please add a rsync target to file '{name}'.".format(
            name=settings.target_main))

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.grab = grab.Grab()
    settings.grab.daemon = True
    settings.grab.start()

    while settings.running:
    #    if not settings.logger.isAlive():
    #        print('The logger stopped running...')
    #        settings.irc_bot.send('PRIVMSG', 'The logger stopped running...',
    #                settings.irc_channel_bot)
    #        settings.running = False
    #    if not settings.irc_bot.isAlive():
    #        print('The IRC bot stopped running...')
    #        settings.running = False
    #    if not settings.upload.isAlive():
    #        print('The uploader stopped running...')
    #        settings.irc_bot.send('PRIVMSG', 'The uploader stopped running...',
    #                settings.irc_channel_bot)
    #        settings.running = False
    #    if not settings.grab.isAlive():
    #        print('The grabber stopped running...')
    #        settings.irc_bot.send('PRIVMSG', 'The grabber stopped working...',
    #                settings.irc_channel_bot)
    #        settings.running = False
        time.sleep(1)
def copy_doc(list_path,options) :   
  dic_newpath = {}
  for path in list_path :
    dirs,filename = os.path.split(path)
    directory,yyyy,mm = dirs.split(os.sep)[-3:]
    subdirectory = tools.create_dir(opt.output_dir,yyyy,mm) # check if create dir
    rootsubdir = os.sep.join(subdirectory.split(os.sep)[-3:])
    newpath = os.path.join(subdirectory,filename)  
    newpathmanifest = os.path.join(rootsubdir,filename)

    lg = path2lg(path)
    if lg not in dic_newpath :
      dic_newpath[lg] = []
    dic_newpath[lg].append(newpathmanifest)

    filename_css = '.'.join(filename.split('.')[:-1])+'.css'
    newpath_css = os.path.join(subdirectory,filename_css)
    path_css = os.path.join(dirs,filename_css)
    if options.clean :
      clean_html(path, newpath)
    else :
      shutil.copyfile(path, newpath)
      shutil.copyfile(path_css, newpath_css)  
  return dic_newpath  
Exemplo n.º 17
0
def main():
    if os.path.isfile('UPDATE'):
        os.remove('UPDATE')

    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log(
        'Starting grabber {name}'.format(name=settings.irc_nick))

    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_new_lists)
    tools.create_dir(settings.dir_old_lists)

    if not os.path.isfile(settings.target_main):
        raise Exception(
            "Please add a rsync target to file '{target_main}'.".format(
                target_main=settings.target_main))

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.grab = grab.Grab()
    settings.grab.daemon = True
    settings.grab.start()

    while settings.running:
        if os.path.isfile('STOP'):
            os.remove('STOP')
            open('UPDATE', 'w').close()
            break
        time.sleep(1)
#         working_dir = os.path.join("comparisons","profasi","%svs%s"%(A_traj_id,B_traj_id))
#         create_dir(working_dir)
#         script["global"]["workspace"]["base"] = working_dir
#         script["data"]["files"] = [A_traj, B_traj]
#         script_path = os.path.join(working_dir,"script.json")
#         save_dic_in_json(script, script_path)
#         os.system("python %s %s "%(PYPROCT, script_path))
#         initial_j_offset = 0
#     initial_i_offset = 0

# Campari vs Profasi
for i in [15]: #range(0,len(profasi_trajs)):
    A_traj = campari_trajs[i]
    path, file = os.path.split(A_traj)
    A_traj_id = file.split(".")[0]

    B_traj = profasi_trajs[i]
    path, file = os.path.split(B_traj)
    B_traj_id = file.split(".")[0]

    script = copy.deepcopy(script_template)
    working_dir = os.path.join("comparisons","campari_vs_profasi","%svs%s"%(A_traj_id,B_traj_id))
    create_dir(working_dir)
    script["global"]["workspace"]["base"] = working_dir
    script["data"]["files"] = [{"file":A_traj,"base_selection":"resnum 3to53"},{"file": B_traj,"base_selection":"resnum 3to53"}]
    script_path = os.path.join(working_dir,"script.json")
    save_dic_in_json(script, script_path)
    os.system("python %s %s "%(PYPROCT, script_path))


import os
import numpy
from sklearn.metrics import mean_squared_error
from math import sqrt
import tools
from pyRMSD.condensedMatrix import CondensedMatrix
from pyproct.driver.handlers.matrix.matrixHandler import MatrixHandler
import copy
from pyproct.clustering.clustering import Clustering
import sklearn.metrics
import math

campari_RDCs = numpy.load(os.path.join("RDCvsRMSD","q_fit_campari.npy"))
profasi_RDCs = numpy.load(os.path.join("RDCvsRMSD","q_fit_profasi.npy"))

tools.create_dir(os.path.join("RDCvsRMSD","campari","RMSD"))
tools.create_dir(os.path.join("RDCvsRMSD","campari","RDC"))

PYPROCT = "/home/victor/workspaces/Python/pyProClust/pyproct/main.py"

# number_of_models = len(campari_RDCs)
# ## Create RDC matrix
# matrix_data = []
# for i in range(0, number_of_models-1):
#     for j in range(i+1, number_of_models):
#         matrix_data.append(sqrt(mean_squared_error(campari_RDCs[i], campari_RDCs[j])))
#
# handler = MatrixHandler( { "method": "load" })
# handler.distance_matrix = CondensedMatrix(matrix_data)
# handler.save_matrix(os.path.join("RDCvsRMSD", "campari", "RDC", "matrix"))
            "lig_OAE": "name OAE",  # Unique name in the ligand
            "ile121": "backbone resnum 121",
            "phe282": "backbone resnum 282",
        },
    }
]

cwd = os.getcwd()
for datum in data:
    prot_name = datum["dir"]
    print "========================\nWorking with %s\n========================" % (prot_name)
    # Look for the directory and enter it
    base_dir = os.path.join(cwd, prot_name)
    os.chdir(base_dir)
    # Create dirs to put results
    create_dir("plots")
    create_dir("selections")

    # Generate all selections
    pdb = prody.parsePDB("%s" % datum["pdb_traj"])
    selections = {}
    for selection in datum["selection"]:
        print selection
        selections[selection] = pdb.select(datum["selection"][selection])
        prody.writePDB(os.path.join("selections", selection), selections[selection])

    #############################
    # Motif VS Helix Distance
    #############################
    calculator = RMSDCalculator(
        calculatorType="QCP_OMP_CALCULATOR",
Exemplo n.º 21
0
def main():
    if os.path.isfile('UPDATE'):
        os.remove('UPDATE')


#    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting NewsGrabber')

    tools.create_dir(settings.dir_new_urllists)
    tools.create_dir(settings.dir_old_urllists)
    tools.create_dir(settings.dir_donefiles)
    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_last_upload)
    tools.create_dir(settings.dir_dumped_url_data)

    if not os.path.isfile(settings.targets):
        settings.logger.log("Please add one or more rsync targets to file " \
                            "'{targets}'".format(targets=settings.targets),
                            'ERROR')
    if not os.path.isfile(settings.keys):
        settings.logger.log("Please add you keys by running 'add_keys.py'.",
                            'ERROR')

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.run_services = service.RunServices()
    settings.run_services.daemon = True
    settings.run_services.start()

    while settings.running:
        if os.path.isfile('STOP'):
            os.remove('STOP')
            open('UPDATE', 'w').close()
            break
        time.sleep(1)
Exemplo n.º 22
0
    # Changed to optparse
    
    parser = OptionParser()
    parser.add_option("--drug", dest="drug")
    parser.add_option("--protein", dest="protein")
    parser.add_option("--results", dest="results")
    parser.add_option("--template", dest="template")
    parser.add_option("--allosteric", action = "store_true", default= False, dest="do_allosteric")
    parser.add_option("--plot", action = "store_true", default= False, dest="do_plots")
    (options, args) = parser.parse_args()

    if options.drug is None or options.protein is None or options.template is None or options.results is None:  
        parser.error('Base directory, protein name, results directory and template are mandatory arguments.')

    RESULTS_PATH = os.path.join(options.results, options.drug, options.protein)
    create_dir(RESULTS_PATH)

    #--------------------------------
    # Perform the filtering
    #--------------------------------
    FILTERED_PDB_FILE = os.path.join(RESULTS_PATH,"%s.filtered.pdb"%(options.protein))
    METRICS_FILE = os.path.join(RESULTS_PATH,"%s.metrics.dat"%(options.protein))
    records = processDir(options.drug, options.protein)
    if not options.do_allosteric:
        selection = filterRecords("'L1  Binding Ene' < -226 and 'L1  Binding Ene' > -424 and 'L1(15.360.555.4)' < 6.5 and 'L1(15.360.555.4)' > 1.5", records)
        genSingleTrajFast(FILTERED_PDB_FILE, records, selection)
        genMetricsFile(METRICS_FILE, ["L1(15.360.555.4)","L1  Binding Ene"], selection)
        metrics = genMetrics(["L1(15.360.555.4)","L1  Binding Ene"], selection).T
    else: # range 6A - 14A
        selection = filterRecords("'L1  Binding Ene' < -226 and 'L1  Binding Ene' > -424 and 'L1(24.954.352.7)' < 14.1 and 'L1(24.954.352.7)' > 5.9", records)
        genSingleTrajFast(FILTERED_PDB_FILE, records, selection)
Exemplo n.º 23
0
 def save(self, output_dir):
     create_dir(output_dir)
     file_name = f"genetic_{datetime.datetime.now().strftime('%d_%H_%M_%S')}.pickle"
     output_file = os.path.join(output_dir, file_name)
     with open(output_file, 'wb') as file:
         pickle.dump(self, file)
Exemplo n.º 24
0
import os
from tools import create_dir
from data_io import get_texture_iter

create_dir('samples')               # create, if necessary, for the output samples 
create_dir('models') 


def zx_to_npx(zx, depth):
    '''
    calculates the size of the output image given a stack of 'same' padded
    convolutional layers with size depth, and the size of the input field zx
    '''
    # note: in theano we'd have zx*2**depth
    return (zx - 1)*2**depth + 1


class Config(object):
    '''
    wraps all configuration parameters in 'static' variables
    '''
    ##
    # network parameters
    nz          = 1                  # num of dim for Z at each field 
    zx          = 12                    # number of spatial dimensions in Z
    zx_sample   = 20                    # size of the spatial dimension in Z for producing the samples
	# l = h/r, m = w/r
    nc          = 1                     # number of channels in input X
    gen_ks      = ([(5,5)] * 5)[::-1]   # kernel sizes on each layer - should be odd numbers for zero-padding stuff       
    dis_ks      = [(9,9)] * 5           # kernel sizes on each layer - should be odd numbers for zero-padding stuff
    gen_ls      = len(gen_ks)           # num of layers in the generative network
Exemplo n.º 25
0
def main():
    settings.init()
    settings.logger = log.Log(settings.log_file_name)
    settings.logger.daemon = True
    settings.logger.start()
    settings.logger.log('Starting NewsGrabber')

    tools.create_dir(settings.dir_new_urllists)
    tools.create_dir(settings.dir_old_urllists)
    tools.create_dir(settings.dir_donefiles)
    tools.create_dir(settings.dir_ready)
    tools.create_dir(settings.dir_last_upload)
    tools.create_dir(settings.dir_dumped_url_data)

    if not os.path.isfile(settings.targets):
        settings.logger.log(
            "Please add one or more rsync targets to file '{settings.targets}'"
            .format(**locals()), 'ERROR')

    settings.irc_bot = irc.IRC()
    settings.irc_bot.daemon = True
    settings.irc_bot.start()
    time.sleep(30)
    settings.upload = upload.Upload()
    settings.upload.daemon = True
    settings.upload.start()
    settings.run_services = service.RunServices()
    settings.run_services.daemon = True
    settings.run_services.start()

    while settings.running:
        time.sleep(1)
def file_path(date,code_url,directory,lang) :
  (dd,mm,yyyy)= date
  subdirectory = tools.create_dir(directory,yyyy,mm) # check if create dir
  filename = '%d-%0.2d-%0.2d_celex_%s.%s.html'%(int(yyyy), int(mm), int(dd), code_url,lang)
  filepath = os.path.join(subdirectory,filename)
  return filepath
        parser.error('You must specify the frames_per_drug specification file.')

if __name__ == '__main__':
    sns.set_style("whitegrid")
    
    parser = OptionParser()
    parser.add_option("--contacts", dest="contacts")
    parser.add_option("--motifs", dest="motifs")
    parser.add_option("--results", dest="results")
    parser.add_option("--drug-atoms", dest="drug_atoms")
    parser.add_option("--frames-per-drug", dest="frames_per_drug")
    
    (options, args) = parser.parse_args()
    check_options(options)
    
    tools.create_dir(options.results)
    
    THRESHOLD = 1000
    
    # Parse all files
    num_atoms_per_drug = parse_drug_info(options.drug_atoms)
    frames_per_prot_drug = parse_frames_info(options.frames_per_drug)
    data, proteins, drugs = parse_contacts_file(options.contacts)
    motifs = parse_motifs(options.motifs)
    
    # Override order for stuff
    ordered_motifs = ['F/I-II', 'A/IV', 'B/V', 'C/VI', 'D', 'E/VII', 'Priming Loop']
    drugs.sort() 
    proteins = ["JEV", "WNV", "TBEV", "BVDV", "HCV", "Polio"]
    
    # PLOT TOTAL NUMBER OF CONTACTS PER PROTEIN