예제 #1
0
 def setUp(self):
     create_database("tests.db", "y")
     self.dbi = DBInterface("tests.db")
     self.maillists = [{
         "id": 1,
         "name": "Hack Bulgaria"
     }, {
         "id": 2,
         "name": "HackFMI"
     }]
     self.subscribers = [{
         "subscriber_id": 1,
         "name": "RadoRado",
         "email": "*****@*****.**"
     }, {
         "subscriber_id": 2,
         "name": "IvoIvo",
         "email": "*****@*****.**"
     }]
     self.maillists_to_subscribers = [{
         "maillist_id": 1,
         "subscriber_id": 1
     }, {
         "maillist_id": 1,
         "subscriber_id": 2
     }, {
         "maillist_id": 2,
         "subscriber_id": 2
     }, {
         "maillist_id": 2,
         "subscriber_id": 3
     }, {
         "maillist_id": 2,
         "subscriber_id": 4
     }]
예제 #2
0
 def newDatabase(self, dbfile):
     """ Create new database """
     # Create new database
     self.album = Album()
     create_database(dbfile)
     self.setDatabaseFile(dbfile)
     self.setFields(self.album.fields)
     return True, ''
예제 #3
0
def create_database_fixture(db_engine, setup='simple_one_user'):
    assert db_engine is not None
    create_database(db_engine=db_engine)

    # Insert mock data
    if setup == 'simple_one_user':
        user = models.User(
            id='123',
            username='******',
            first_name='Gary')
        session = sessionmaker(bind=db_engine)()
        session.add(user)
        session.commit()
        session.close()
예제 #4
0
 def __init__(self, dbfile=None, parent=None):
     super(PhotoDatabase, self).__init__(parent)
     self._dbfile = None
     if dbfile and os.path.exists(dbfile):
         # Open an existing database
         st, album = self.load(
             dbfile
         )  #Note look into combining album field initialization with ours
         if not st:
             if album:
                 # There was an error
                 warning_box(album)  # Probably move this to the caller
             self.album = Album()
         else:
             self.album = album
             self.setDatabaseFile(dbfile)
             self.setFields(album.fields)
     else:
         # Create new database
         self.album = Album()
         if dbfile:
             create_database(dbfile)
             self.setDatabaseFile(dbfile)
             self.setFields(self.album.fields)
예제 #5
0
    # Different possible command
    if args.command == "medias":
        add_medias(args.add, args.caption, action_scheduler)
    # Find comments
    elif args.command == "find-comments":
        find_medias(config, args.model, action_scheduler, 'comment',
                    args.threshold)
    # Find likes
    elif args.command == "find-likes":
        find_medias(config, args.model, action_scheduler, 'like',
                    args.threshold)
    elif args.command == "tools":
        # Create database
        if args.create_database:
            create_database(config)
        elif args.create_config:
            create_config(args.config)
        # end if
    # Executor
    elif args.command == "execute":
        execute_actions(config, action_scheduler)
    # List friends
    elif args.command == "friends":
        # Update friends
        if args.update:
            friends_manager.update()
        # end if
    else:
        sys.stderr.write(pystr.ERROR_UNKNOWN_COMMAND.format(args.command))
    # end if
예제 #6
0
import os

from sqlalchemy import and_, or_
from sqlalchemy.orm import Session as SQLSession

from models.database import DATABASE_NAME, Session
import create_database as db_creator
from models.lesson import Lesson, association_table
from models.student import Student
from models.group import Group

if __name__ == '__main__':
    db_is_created = os.path.exists(DATABASE_NAME)
    if not db_is_created:
        db_creator.create_database()

    session: SQLSession = Session()
    print(f'Кол-во студентов: {session.query(Student).count()}')
    student = session.query(Student).filter(
        and_(Student.surname.like('Д%'), Student.age > 18)).one()
    print(student)
    print('*' * 30)
    students = session.query(Student).filter(
        and_(Student.surname.like('А%'), Student.age > 16))
    for it in students:
        print(it)
    print('*' * 30)
    students_list: list[Student] = [it for it in students]
    print(students_list)
    print('*' * 30)
    for it in session.query(Student).filter(
예제 #7
0
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_database import KeyWord, Base, Content, db_file_name, create_database

# judge if the db file exists
try:
    with open(db_file_name, 'r'):
        print(db_file_name + ' already exists')
except:
    create_database()

engine = create_engine('sqlite:///' + db_file_name +
                       '?check_same_thread=False')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine

DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()


def create_content(title, url, keyword):

    content = Content(title=title, url=url, keyword=keyword)
예제 #8
0
 def setUp(self):
     create_database("test_database.db")
     self.test_db = Database("test_database.db")
예제 #9
0
        idx = 0
        with open(self.input_path) as csv_file:
            reader = csv.DictReader(csv_file)
            for row in reader:
                idx += 1
                if 1 <= idx <= self.number_of_entries:
                    instance = AccidentInstance(
                        row["ID"], row["Severity"], row["Start_Time"],
                        row["Street"], row["Side"], row["City"], row["County"],
                        row["State"], row["Temperature(F)"],
                        row["Visibility(mi)"], row["Weather_Condition"])
                    self.accidents.append(instance)
                else:
                    break

    def write_data(self):
        with open(self.output_path, mode='w', newline='') as out_file:
            writer = csv.writer(out_file)
            writer.writerow(self.header)
            writer.writerows(list(accident) for accident in self.accidents)


file_path = r"..\data\complete\US_Accidents_Dec20.csv"
mock_data_path = r"..\data\mock\mock_data.csv"
mockData = MockData(file_path, mock_data_path, 100)
mockData.get_data()
mockData.write_data()

connection = create_database.create_database()
create_database.write_db(connection=connection, accidents=mockData.accidents)
예제 #10
0
def example_db(
    file_name,
    test_config={
        'node_effects': False,
        'group_effects': False,
        'sex_effect': False,
        'use_group_mulcov': False,
        'include_group_data': False,
        'zero_sum_mulcov': False
    },
    truth={},
    prior=dict(subgroup_effects=None,
               parent_density='uniform',
               parent_std=None,
               child_density='uniform',
               child_std=None,
               subgroup_density='uniform',
               subgroup_std=1),
    node_effects=None,
    subgroup_effects=None,
    tol_fixed=1e-10,
    tol_random=1e-10,
):

    if os.path.exists(file_name):
        os.remove(file_name)

    # Note that the a, t values are not used for this example

    def fun_iota_parent(a, t):
        return ('prior_iota_parent', None, None)

    if test_config['node_effects']:

        def fun_iota_child(a, t):
            return ('prior_iota_child', None, None)

    if test_config['group_effects']:

        def fun_iota_group(a, t):
            return ('prior_iota_group', None, None)

        def fun_iota_subgroup(a, t):
            return ('prior_iota_subgroup', None, None)

    if test_config['sex_effect']:

        def fun_iota_sex(a, t):
            return ('prior_iota_sex', None, None)

    # TODO: Delete dependency with dismod_at
    # ----------------------------------------------------------------------
    # age table
    age_list = [0.0, 100.0]
    #
    # time table
    time_list = [1990.0, 2020.0]
    #
    # integrand table
    integrand_table = [{'name': 'Sincidence'}]
    #
    # node table: world -> north_america
    #             north_america -> (united_states, canada)
    if test_config['node_effects']:
        node_table = [
            {
                'name': 'p1',
                'parent': ''
            },
            {
                'name': 'c1',
                'parent': 'p1'
            },
            {
                'name': 'c2',
                'parent': 'p1'
            },
        ]
    else:
        node_table = [
            {
                'name': 'p1',
                'parent': ''
            },
        ]

    #
    # weight table:
    weight_table = list()
    #
    # covariate table
    covariate_table = [{
        'name': 'one',
        'reference': 0.0,
        'max_difference': None
    }]
    if test_config['sex_effect']:
        covariate_table.append({
            'name': 'sex',
            'reference': 0.0,
            'max_difference': None
        })
    #
    # mulcov table
    mulcov_table = []
    if test_config['group_effects']:
        mulcov_table.append({
            'covariate':
            'one',
            'type':
            'rate_value',
            'effected':
            'iota',
            'group':
            'g1',
            'smooth':
            'smooth_iota_group' if test_config['use_group_mulcov'] else None,
            # 'smooth' :   None,
            'subsmooth':
            'smooth_iota_subgroup'
        })
    if test_config['sex_effect']:
        mulcov_table.append({
            'covariate': 'sex',
            'type': 'rate_value',
            'effected': 'iota',
            'group': 'g1' if test_config['group_effects'] else 'none',
            'smooth': 'smooth_iota_sex'
        })
    #
    # avgint table:
    avgint_table = list()
    #
    # nslist_table:
    nslist_table = dict()
    # ----------------------------------------------------------------------
    # subgroup_table
    subgroup_table = [
        {
            'subgroup': 'none',
            'group': 'none'
        },
        {
            'subgroup': 's1',
            'group': 'g1'
        },
        {
            'subgroup': 's2',
            'group': 'g1'
        },
    ]
    # ----------------------------------------------------------------------
    # data table:
    data_table = list()
    # write out data
    row = {
        'density': 'gaussian',
        'weight': '',
        'hold_out': False,
        'time_lower': 2000.0,
        'time_upper': 2000.0,
        'age_lower': 50.0,
        'age_upper': 50.0,
        'integrand': 'Sincidence',
        'one': 1,
        # 'node':         'north_america',
        'node': 'p1',
        # 'eta':          1e-4,
    }
    sexs = [0, 1] if test_config['sex_effect'] else [0]
    for node, node_effect in node_effects.items():
        if (not test_config['node_effects'] and node != 'p1'): continue
        # Exclude data for the parent node
        if (test_config['node_effects'] and node == 'p1'): continue
        for sex in sexs:
            for sg, sge in subgroup_effects.items():
                if (not test_config['group_effects'] and sg != 'none'):
                    continue
                # Exclude data for the group -- if fitting both nodes and groups, omitting sg none creates Hessian errors
                if (test_config['group_effects']
                        and not test_config['include_group_data']
                        and sg == 'none'):
                    continue
                total_effect = 0
                if test_config['sex_effect']:
                    use_sex_covariate = (sg != 'none') or (
                        sg == 'none' and not test_config['group_effects'])
                    row['sex'] = sex if use_sex_covariate else -1
                    subgroups = pd.DataFrame(subgroup_table)
                    group = subgroups.loc[
                        subgroups.subgroup == sg, 'group'].squeeze(
                        ) if test_config['group_effects'] else 'g1'
                    sex_effect = sex * truth['iota_sex_true'][group]
                    total_effect += sex_effect
                if test_config['node_effects']:
                    row['node'] = node
                    total_effect += node_effect
                row['subgroup'] = sg
                sg_effect = 0
                if test_config['group_effects']:
                    if sg in ('s1', 's2'):
                        sg_effect = truth['iota_group_true'] + sge
                total_effect += sg_effect
                # print ({'sex_effect': (sex, sex_effect), 'node_effect': (node, node_effect), 'sg_effect': (sg, sg_effect), 'total_effect': total_effect})
                row['meas_value'] = truth['iota_parent_true'] * np.exp(
                    total_effect)
                row['meas_std'] = row['meas_value'] * 1e-1
                data_table.append(copy.copy(row))
    # ----------------------------------------------------------------------
    # prior_table
    prior_table = [
        { # prior_iota_parent
            'name':     'prior_iota_parent',
            'density':  prior.get('parent_density', 'iniform'),
            'mean':     prior.get('parent_mean', truth['iota_parent_true'] * .5),
            'std':      prior.get('parent_std', 0),
            'eta':      prior.get('parent_eta', None),
            'lower':    truth['iota_parent_true'] * 1e-2,
            'upper':    truth['iota_parent_true'] * 1e+2,
        },{ # prior_iota_child
            'name':     'prior_iota_child',
            'density':  prior.get('child_density', 'uniform'),
            'mean':     prior.get('child_mean', .001),
            'std':      prior.get('child_std', 0),
            'eta':      prior.get('child_eta', None),
            'lower':    -np.inf,
            'upper':    +np.inf,
        },
        { # prior_iota_group
            'name': 'prior_iota_group',
            'density': prior.get('group_density', 'uniform'),
            'mean':    prior.get('group_mean', 0.0),
            'std': prior.get('group_std', 0),
            # 'density': 'gaussian',
            # 'mean': 0.0,
            # 'std': 10.0,
        },
        { # prior_iota_subgroup
            'name': 'prior_iota_subgroup',
            'density': prior.get('subgroup_density', 'uniform'),
            'mean':    prior.get('subgroup_mean', 0.0),
            'std':     prior.get('subgroup_std', 0),
        }
    ]
    if test_config['sex_effect']:
        prior_table.append({  # prior_iota_sex
            'name': 'prior_iota_sex',
            'density': 'uniform',
            'mean': 0.0,
            'lower': -100,
            'upper': 100
        })
    # ----------------------------------------------------------------------
    # smooth table
    smooth_table = [{  # smooth_iota_parent
        'name': 'smooth_iota_parent',
        'age_id': [0],
        'time_id': [0],
        'fun': fun_iota_parent
    }]
    if test_config['node_effects']:
        smooth_table += [{  # smooth_iota_child
            'name': 'smooth_iota_child',
            'age_id': [0],
            'time_id': [0],
            'fun': fun_iota_child
        }]
    if test_config['group_effects']:
        if test_config['use_group_mulcov']:
            smooth_table += [{  # smooth_iota_group
                'name': 'smooth_iota_group',
                'age_id': [0],
                'time_id': [0],
                'fun': fun_iota_group
            }]
        smooth_table += [{  # smooth_iota_subgroup
            'name': 'smooth_iota_subgroup',
            'age_id': [0],
            'time_id': [0],
            'fun': fun_iota_subgroup
        }]
    if test_config['sex_effect']:
        smooth_table.append({  # smooth_iota_sex
            'name': 'smooth_iota_sex',
            'age_id': [0],
            'time_id': [0],
            'fun': fun_iota_sex
        })
    # ----------------------------------------------------------------------
    # rate table
    rate_table = [{
        'name':
        'iota',
        'parent_smooth':
        'smooth_iota_parent',
        'child_smooth':
        'smooth_iota_child' if test_config['node_effects'] else None,
    }]

    # ----------------------------------------------------------------------
    # option_table
    option_table = [
        # { 'name':'parent_node_name',       'value':'north_america' },
        {
            'name': 'parent_node_name',
            'value': 'p1'
        },
        {
            'name': 'print_level_fixed',
            'value': 5
        },
        # { 'name':'print_level_fixed',      'value':0               },
        {
            'name': 'quasi_fixed',
            'value': 'false'
        },
        # { 'name':'derivative_test_fixed',  'value':'second-order'   },
        # { 'name':'derivative_test_fixed',  'value':'trace-adaptive'   },
        {
            'name': 'tolerance_fixed',
            'value': tol_fixed
        },
        {
            'name': 'bound_frac_fixed',
            'value': '1e-10'
        },
        {
            'name': 'derivative_test_random',
            'value': 'second-order'
        },
        {
            'name': 'tolerance_random',
            'value': tol_random
        },
        {
            'name':
            'zero_sum_mulcov_group',
            'value':
            'g1' if test_config['group_effects']
            and test_config['zero_sum_mulcov'] else None
        },
        {
            'name': 'zero_sum_child_rate',
            'value': 'iota' if test_config['node_effects'] else None
        },
        {
            'name': 'rate_case',
            'value': 'iota_pos_rho_zero'
        },
        {
            'name': 'max_num_iter_fixed',
            'value': '1000'
        },
        {
            'name': 'max_num_iter_random',
            'value': '100'
        }
    ]
    # ----------------------------------------------------------------------

    # TODO: Change to using DismodIO instead of dismod_at.create_database
    from cascade_at.dismod.api.dismod_io import DismodIO
    db = DismodIO(file_name)
    try:
        from .create_database import create_database
    except:
        from create_database import create_database

    # create database
    #dismod_at.create_database(
    create_database(file_name, age_list, time_list, integrand_table,
                    node_table, subgroup_table, weight_table, covariate_table,
                    avgint_table, data_table, prior_table, smooth_table,
                    nslist_table, rate_table, mulcov_table, option_table)
    # ----------------------------------------------------------------------
    from cascade_at.dismod.api.dismod_io import DismodIO
    db = DismodIO(file_name)
    return db
예제 #11
0
def main():
    create_database()
    try:
        enter_command()
    except Exception as e:
        print(e)
예제 #12
0
models.database.Base.metadata.create_all(bind=engine)

app = FastAPI(title="Virtual Microscope API",
              description="API for thesis by student Andrey Otroshchenko",
              docs_url="/docs",
              redoc_url=None)

app.include_router(department.router,
                   prefix="/departments",
                   tags=["departments"])
app.include_router(object.router, prefix="/objects", tags=["objects"])


@app.exception_handler(RequestValidationError)
async def validation_exception_handler(_: Request,
                                       exc: RequestValidationError):
    print(_, exc)
    return JSONResponse(
        status_code=HTTP_422_UNPROCESSABLE_ENTITY,
        content=jsonable_encoder({
            "detail": exc.errors(),
            "body": exc
        }),
    )


if __name__ == '__main__':
    db_creator.create_database(False)

    uvicorn.run(app, host="127.0.0.1", port=8000, log_level="debug")
def build_conversations(max_in_memory_value = 10000, database_filename = "-", db_name = "tweet_database", drop_if_nonempty = True):
    '''
    Function to organize Tweets into conversations 
    (conversations = list of Tweets linked by inReplyTo fields)

    The output from this script is a set of JSON payloads (1 per line) that contain:
    {
        "tweets": [  # time-sorted list of Tweets
            { < Tweet payload > }, # if the first Tweet was missing, it has the format: {"missing_tweet_id": _, "screen_name": _, "user_id": _}
            { < Tweet payload > }  
          ],  
        "depths": [0,1...] #List of depths, same order as the tweets list  
    }

    Tweets in a conversation are time-sorted.

    The output is intended to provide a way to group Tweets so that the user can do 
    a row-level conversation analysis without having to hold more than 1 conversation's Tweets in memory.
    '''

    # get the logger
    logging.getLogger("root")
    ##################################################################################### Database creation step

    # store all of the Twets in a database with the following keys:
    # _id, in_reply_to_id, in_reply_to_user, in_reply_To_user_id
    client, db_name, tweet_collection = create_database(database_filename, db_name, drop_if_nonempty)

    ##################################################################################### Graph creation step

    # get links from parent to child nodes
    # the .aggregate function is provided by pymongo, as are the syntax/functions of this group step
    parent_to_children = {
        x["_id"]: {"children": x["children"], 
                   "in_reply_to_user": x["in_reply_to_user"], 
                   "in_reply_to_user_id": x["in_reply_to_user_id"] } 
          for x in tweet_collection.aggregate([
                # define pymongo group step for aggregating the database
                {"$group": { "_id": "$in_reply_to_id", 
                             "children": {"$push" : "$tweet_id"},
                             "in_reply_to_user": {"$first" : "$in_reply_to_user"},
                             "in_reply_to_user_id": {"$first" : "$in_reply_to_user_id"}
                           }}])
        }

    logging.debug('There were {} individual Tweets in the input.'.format(tweet_collection.count()))

    # make sure we have a "NOT_A_REPLY" key
    if "NOT_A_REPLY" not in parent_to_children:
        parent_to_children["NOT_A_REPLY"] = {"children": [], "in_reply_to_user": "******"}

    # get the root nodes so that we can build the graphs
    root_nodes = []
    all_children = []
    for key in parent_to_children:
        all_children.extend(parent_to_children[key]["children"])

    tweets_involved = len(set(all_children) | set(parent_to_children.keys()) - set(["NOT_A_REPLY"]))
    logging.debug('There are {} Tweets involved in the conversation'.format(tweets_involved) + 
       ' (some Tweets appear in an "inReplyTo" field, so we know they existed, ' + 
       'but they were not in the dataset)')

    # keys that are not children + tweets that are not replies
    root_nodes = (set(parent_to_children.keys()) - set(["NOT_A_REPLY"]) - 
        set(all_children)) | (set(parent_to_children["NOT_A_REPLY"]["children"])) 
    logging.debug('There are {} individual conversation threads.'.format(len(root_nodes)))
    del(all_children)

    # all of the conversation graphs
    multi_node_graphs = []
    # group the tweets together in conversations
    for root in root_nodes:
        children = find_children(root, None, 0, parent_to_children)
        multi_node_graphs.append(sorted(children, key=lambda k: k["depth"]))

    # in case of missing tweets, we want some info about the originating user
    tweet_to_screenname = {k: {"user_id": v["in_reply_to_user_id"], 
                               "screen_name": v["in_reply_to_user"]} for k,v in parent_to_children.items()}

    del(parent_to_children)
    del(root_nodes)

    logging.debug('Finished buiding the tree graph structure.')

    ##################################################################################### Graph hydration step
    # add the actual payloads of the Tweets and information about the graph structure to 
    # conversation objects

    # break up the graphs into pieces so that we can query for each piece
    shards = {0: {"tweets":[], "start": 0, "end": 1}}
    shard_number = 0
    for i,graph in enumerate(multi_node_graphs):
        if len(shards[shard_number]["tweets"]) + len(graph) > max_in_memory_value:
            shard_number += 1
            shards[shard_number] = {"tweets":[], "start": i, "end": i + 1}
        shards[shard_number]["tweets"].extend([x["tweet_id"] for x in graph])
        shards[shard_number]["end"] = i + 1

    shard_number += 1

    logging.debug('Broke the data into shards. There are {} shards '.format(shard_number) + 
        '(this is the number of calls that will be made to the database)')

    ##################################################################################### 

    logging.debug('Beginning to hydrate conversations.')
    #hydrated_conversations = [] #debugging
    for item,shard in list(shards.items()):
        # load up a shard of conversations
        id_to_tweet = {x["tweet_id"]: ujson.loads(x["tweet_payload"])
                         for x in tweet_collection.find( { "tweet_id": {"$in": shard["tweets"]} } )}
        # grab the conversations that we care about
        for conversation in multi_node_graphs[shard["start"]:shard["end"]]:
            # the "hydration" step provides a list of Tweets and some data about them
            # now "hydrate" each conversation (give it the actual tweet)
            hydrated_conversation = []
            for tweet in conversation:
                try:
                    # if it is a Tweet in our dataset
                    tweet_dict = id_to_tweet[tweet["tweet_id"]]
                    hydrated_conversation.append(
                        { 
                          "depth": tweet["depth"], 
                          "tweet": tweet_dict
                        }
                        )
                except KeyError:
                    # if it's not a Tweet in our dataset
                    hydrated_conversation.append(
                        {
                          "depth": tweet["depth"],
                          "tweet": {"missing_tweet_id": tweet["tweet_id"], 
                                    "screen_name": tweet_to_screenname[tweet["tweet_id"]]["screen_name"],
                                    "user_id": tweet_to_screenname[tweet["tweet_id"]]["user_id"]}
                        }
                        )
            # time-sort the conversation. 
            hydrated_conversation_sorted = sorted(hydrated_conversation, 
                key = lambda x: snowflake2utc(fg.tweet_id(x["tweet"])))
            conversation_payload = {"depths": [x["depth"] for x in hydrated_conversation_sorted], 
                                    "tweets": [x["tweet"] for x in hydrated_conversation_sorted]}
            # print the conversation payload
            #print(ujson.dumps(conversation_payload))
            yield(conversation_payload)
            #hydrated_conversations.append(hydrated_conversation) #debugging
        logging.debug('{} shards have been processed. There are {} shards remaining.'.format(item + 1, shard_number - item - 1))

    ##################################################################################### Cleanup

    # Close the database
    tweet_collection.drop()
    client.drop_database(db_name)
    client.close()

    logging.debug('Cleaned up the database (deleted the database & collection, closed the client)')
예제 #14
0
 def setUp(self):
     create_database("tests.db")
     self.test_interface = command_handler.CommandHandler("tests.db")
예제 #15
0
        nodes_writer.writeheader()
        node_tags_writer.writeheader()
        ways_writer.writeheader()
        way_nodes_writer.writeheader()
        way_tags_writer.writeheader()

        validator = cerberus.Validator()

        for element in get_element(file_in, tags=('node', 'way')):
            el = shape_element(element)
            if el:
                if validate is True:
                    validate_element(el, validator)

                if element.tag == 'node':
                    nodes_writer.writerow(el['node'])
                    node_tags_writer.writerows(el['node_tags'])
                elif element.tag == 'way':
                    ways_writer.writerow(el['way'])
                    way_nodes_writer.writerows(el['way_nodes'])
                    way_tags_writer.writerows(el['way_tags'])


if __name__ == '__main__':
    # Note: Validation is ~ 10X slower. For the project consider using a small
    # sample of the map when validating.
    OSM_PATH = 'Toulouse.osm'
    process_map(OSM_PATH, validate=False)
    create_database.create_database(OSM_PATH.replace('.osm', '_osm.db'))
예제 #16
0
 if answerAddSmbEntry == "y" or answerAddSmbEntry == "Y":
   add_smb_entry.add_smb_entry(projectName)
 
 answerAddVhost = raw_input("add virtual host "+projectName+"? [y/n]")
 if answerAddVhost == "y" or answerAddVhost == "Y":
   add_vhost.add_vhost(projectName)
 
 answerCheckoutFromSVN = raw_input("checkout from svn? [y/n]")
 if answerCheckoutFromSVN == "y" or answerCheckoutFromSVN == "Y":
   svnRepositoryUrl = raw_input("please enter the subversion url you want to checkout from: ")
   checkout_from_svn.checkout_from_svn(projectName, svnRepositoryurl)
 
 answerCreateDatabase = raw_input("create database and database user? [y/n]")
 if answerCreateDatabase == "y" or answerCreateDatabase == "Y":
   mysqlDatabaseUserName = raw_input("please enter the mysql user name: ")
   mysqlDatabaseUserPassword = raw_input("please enter the mysql user name password: "******"add hosts entry? [y/n]")
 if answerAddHostsEntry == "y" or answerAddHostsEntry == "Y":
   add_hosts_entry.add_hosts_entry(projectName)
 
 answerDatabaseDump = raw_input("do you have dump of database that you'd like to import? [y/n]")
 if answerDatabaseDump == "y" or answerDatabaseDump == "Y":
   pathToDumpFile = raw_input("please enter path to the dump file: ")
   read_dump.read_dump(pathToDumpFile)
 
 print "" 
 print "all good"
 print "have fun!"
 sys.exit(0)
예제 #17
0
from create_database import create_database
from create_database import open_database
from fill_base import fill_base_test
#from test_database import test_base
from fill_base import fill_base_real
import mpi4py
from mpi4py import MPI
if __name__ == "__main__":
    db_name = "chemdatabase.db"
    create_database(db_name)
    conn, cursor = open_database(db_name)

    #comm = MPI.COMM_WORLD

    #print("Hello! I'm rank %d from %d running in total..." % (comm.rank, comm.size))
    #comm.Barrier()  # wait for everybody to synchronize _here_
    #fill_base_test(cursor)
    #test_base(cursor)
    fill_base_real(cursor,
                   conn,
                   name_of_file="toxicity_85832.csv",
                   name_of_task="TestTox100",
                   name_of_descriptor="mordred",
                   version_of_descriptor="1.12")
    fill_base_real(cursor,
                   conn,
                   name_of_file="melting_prepared_282517.csv",
                   name_of_task="TestTask1000",
                   name_of_descriptor="mordred",
                   version_of_descriptor="1.12")