Beispiel #1
0
    def insert_details(self, data, project_path, isghb, cycle_id, status, obs_no):
        print("------------------==insert_details==----------------------")
        print(data)
        print(project_path)
        dbutils = DBUtils()
        for each_rec in data:
            print("==each_rec==", each_rec)
            lta_file = os.path.basename(each_rec)
            if len(lta_file) == 1:
                lta_file = lta_file[0]
            try:
                current_date_timestamp = datetime.datetime.fromtimestamp(currentTimeInSec).strftime('%Y-%m-%d %H:%M:%S')
                lta_details = gS.get_naps_scangroup_details(lta_file)
                utils = self
                lta_details["ltacomb_size"] = int(utils.calculalate_file_sizse_in_MB(each_rec))
                lta_details["status"] = "unprocessed"
                lta_details["base_path"] = project_path
                lta_details["start_time"] = current_date_timestamp
                lta_details["proposal_dir"] = project_path.split('/')[-1]
                lta_details["pipeline_id"] = 1
                lta_details["comments"] = status
                lta_details["counter"] = 0
                lta_details["ltacomb_file"] = lta_file
                lta_details["isghb"] = isghb
                lta_details["cycle_id"] = cycle_id
                if obs_no:
                    lta_details["observation_no"] = int(obs_no)


                projectobsno_data = {}
                for key in tableSchema.projectobsnoData.iterkeys():
                    if key in lta_details.iterkeys():
                        projectobsno_data[key] = lta_details[key]

                ltadetails_data = {}
                for key in tableSchema.ltadetailsData.iterkeys():
                    if key in lta_details.iterkeys():
                        ltadetails_data[key] = lta_details[key]
                #print ltadetails_data
                # print("ltadetails_data")
                # print(ltadetails_data)

                columnKeys = {"project_id"}
                whereKeys = {"proposal_dir": lta_details["proposal_dir"], "cycle_id": cycle_id}

                project_id = dbutils.select_test_table("projectobsno", columnKeys, whereKeys, 0)

                if project_id:
                    project_id = project_id[0]
                else:
                    project_id = dbutils.insert_into_table("projectobsno", projectobsno_data, tableSchema.projectobsnoId)

                ltadetails_data["project_id"] = project_id

                lta_id = dbutils.insert_into_table("ltadetails", ltadetails_data, tableSchema.ltadetailsId)
                print lta_id
                print("projectobsno")
                print(projectobsno_data)
            except Exception as e:
                print e
Beispiel #2
0
def __prerequisites():
    """
    Fetch the basic proposal data depending on the CYCLE_ID
    This is basically stage0
    :return:
    """
    CYCLE_ID = Pipeline().pipeline_configuration()["cycle_id"]
    CYCLE_PATH = Pipeline().pipeline_configuration()["cycle_path"]
    CYCLE_ID = str(CYCLE_ID)

    sql_query = "select distinct o.observation_no, p.proposal_id, g.file_path, p.backend_type from " \
                "gmrt.proposal p inner join das.observation o on p.proposal_id = o.proj_code " \
                "inner join das.scangroup g on g.observation_no = o.observation_no " \
                "inner join das.scans s on s.scangroup_id = g.scangroup_id " \
                "inner join gmrt.sourceobservationtype so on p.proposal_id = so.proposal_id " \
                "where p.cycle_id ='" + CYCLE_ID + "' " \
                                                   "and so.obs_type not like 'pulsar%' " \
                                                   "and so.obs_type not like 'phased array'" \
                                                   "and s.sky_freq1=s.sky_freq2 " \
                                                   "and s.sky_freq1 < 900000000 " \
                                                   "and s.chan_width >= 62500 " \
                                                   "and o.proj_code not like '16_279' " \
                                                   "and o.proj_code not like '17_072' " \
                                                   "and o.proj_code not like '18_031' " \
                                                   "and o.proj_code not like '19_043' " \
                                                   "and o.proj_code not like '20_083' " \
                                                   "and o.proj_code not like '21_057';"

    dbutils = DBUtils()

    data = dbutils.select_query(sql_query)
    gadpudata = {}
    for each_row in data:
        gadpudata[each_row[0]] = {
            "proposal_id": each_row[1],
            "file_path": each_row[2],
            "backend_type": each_row[3],
            "cycle_id": CYCLE_ID
        }
    return (gadpudata, CYCLE_PATH)
Beispiel #3
0
    def __prerequisites_ghb(self):
        CYCLE_ID = 15
        CYCLE_PATH = '/GARUDATA/IMAGING15/CYCLE15/'
        CYCLE_ID = str(CYCLE_ID)

        sql_query = "select distinct o.observation_no, p.proposal_id, g.file_path, p.backend_type from " \
                    "gmrt.proposal p inner join das.observation o on p.proposal_id = o.proj_code " \
                    "inner join das.scangroup g on g.observation_no = o.observation_no " \
                    "inner join das.scans s on s.scangroup_id = g.scangroup_id " \
                    "inner join gmrt.sourceobservationtype so on p.proposal_id = so.proposal_id " \
                    "where p.cycle_id ='" + CYCLE_ID + "' " \
                                                       "and so.obs_type not like 'pulsar%' " \
                                                       "and so.obs_type not like 'phased array'" \
                                                       "and s.sky_freq1=s.sky_freq2 " \
                                                       "and s.sky_freq1 < 900000000 " \
                                                       "and s.chan_width >= 62500 " \
                                                       "and o.proj_code not like '16_279' " \
                                                       "and o.proj_code not like '17_072' " \
                                                       "and o.proj_code not like '18_031' " \
                                                       "and o.proj_code not like '19_043' " \
                                                       "and o.proj_code not like '20_083' " \
                                                       "and o.proj_code not like '23_066' " \
                                                       "and o.proj_code not like '26_063' " \
                                                       "and o.proj_code not like 'ddtB014' " \
                                                       "and o.proj_code not like 'ddtB015' " \
                                                       "and o.proj_code not like 'ddtB028' " \
                                                       "and o.proj_code not like '21_057';"
        dbutils = DBUtils()
        data = dbutils.select_query(sql_query)
        gadpudata = {}
        for each_row in data:
            gadpudata[each_row[0]] = {
                "proposal_id": each_row[1],
                "file_path": each_row[2],
                "backend_type": each_row[3],
                "cycle_id": CYCLE_ID
            }
        return (gadpudata, CYCLE_PATH)
Beispiel #4
0
 def setUp(self):
     self.pgdb = DBUtils(password=os.environ['PSYCOPG2_DB_PASS'])
Beispiel #5
0
class Bank:
    __db = DBUtils(database="bank")
    __bankName = "中国工商银行北京市沙河支行"

    def getBankName(self):
        return self.__bankName

    def bankAddUser(self, user):
        sql1 = "select count(1) from t_user"  # 查询是否已满的数据库
        sql = "select * from t_user where username = %s"  # 检查是否存在该用户
        sql2 = "insert into t_user(" \
               "account,username,password,country," \
               "province,street,door,money,reg_date,bankname)" \
               " values(%s,%s,%s,%s,%s,%s,%s,%s,now(),%s)"
        param = [user.getUsername()]
        if self.__db.select(sql1, None)[0][0] >= 100:
            return 3
        elif len(self.__db.select(sql, param)) != 0:
            return 2
        else:
            param1 = [
                user.getAccount(),
                user.getUsername(),
                user.getPassword(),
                user.getAddress().getCountry(),
                user.getAddress().getProvince(),
                user.getAddress().getStreet(),
                user.getAddress().getDoor(),
                user.getMoney(),
                user.getBankName()
            ]
            self.__db.update(sql2, param1)

            print(
                user_info.format(account=user.getAccount(),
                                 username=user.getUsername(),
                                 password=user.getPassword(),
                                 country=user.getAddress().getCountry(),
                                 province=user.getAddress().getProvince(),
                                 street=user.getAddress().getStreet(),
                                 door=user.getAddress().getDoor(),
                                 money=user.getMoney(),
                                 time=user.getRegisterDate(),
                                 bank_name=user.getBankName()))
            self.__db.releaseConnection()
            return 1

    def selectUser(self, account, password):
        sql = "select * from t_user where account = %s  and password = %s"
        param = [account, password]
        result = self.__db.select(sql, param, mode="one")

        return result

    def bankSaveMoney(self, account, money):
        sql = "select * from t_user where username = %s"
        sql2 = "select money from t_user where account = %s"
        sql3 = "update t_user set money = %s where account = %s"
        param = [account]
        a = self.__db.select(sql, param)
        if len(a) != 0:
            return 2
        else:
            a = self.__db.select(sql2, param, mode="one")
            a = a[0] + money
            param1 = [a, account]
            self.__db.update(sql3, param1)
            return True

    def bankGetMoney(self, account, password, money):
        sql = "select * from t_user where account = %s"
        sql1 = "select * from t_user where account = %s  and password = %s"
        sql2 = "select money from t_user where account = %s"
        sql3 = "update t_user set money = %s where account = %s"
        param = [account]
        param1 = [account, password]
        a = self.__db.select(sql2, param, mode="one")
        if len(self.__db.select(sql, param)) == 0:
            return 1
        elif len(self.__db.select(sql1, param1)) != 0:
            if a[0] < money:
                return 3
            else:
                b = a[0] - money
                param2 = [b, account]
                self.__db.update(sql3, param2)
                return 0
        else:
            return 2

    def bankMoveMoney(self, account, password, money, account1):
        sql = "select * from t_user where account = %s "
        sql1 = "select * from t_user where account = %s  and password = %s"
        sql2 = "select money from t_user where account = %s"
        sql3 = "update t_user set money = %s where account = %s"
        param = [account]
        param1 = [account1]
        param2 = [account, password]
        a = self.__db.select(sql2, param, mode="one")
        c = self.__db.select(sql2, param1, mode="one")
        if len(
                self.__db.select(sql1, param) != 0
                and len(self.__db.select(sql1, param))) != 0:
            if len(self.__db.select(sql1, param2)) != 0:
                if a[0] < money:
                    return 3
                else:
                    b = a[0] - money
                    d = a[0] + money
                    param3 = [b, account]
                    param4 = [c, account1]
                    self.__db.update(sql3, param3)
                    self.__db.update(sql3, param4)
                    return 0
            else:
                return 2
        else:
            return 1

    def selectUser1(self, account, password):
        bank = Bank()
        sql = "select * from t_user where account=%s"
        sql1 = "select * from t_user where account=%s and password=%s"
        param = [account]
        param1 = [account, password]
        if len(self.__db.select(sql, param)) == 0:
            print("账号不存在!")
        elif len(self.__db.select(sql1, param1)) == 0:
            print("账号或密码错误!")
        else:
            database = bank.selectUser(account, password)
            print(
                user_info.format(account=database[1],
                                 username=database[2],
                                 password=database[3],
                                 country=database[4],
                                 province=database[5],
                                 street=database[6],
                                 door=database[7],
                                 money=database[8],
                                 time=database[9],
                                 bank_name=database[10]))
Beispiel #6
0
    def parse_args(db_config_file):
        # Query the database to gather some items for argument output.
        pgdb = DBUtils(config_file=db_config_file)
        valid_db_opts = pgdb.getIDs()

        # Parse the arguments
        parser = argparse.ArgumentParser(
            description="Launches SCOOP parallelized version "
            "of genetic algorithm evaluation.",
            formatter_class=argparse.RawTextHelpFormatter)

        parser.add_argument("trail",
                            type=int,
                            metavar='trail',
                            help="Trail to use.",
                            choices=valid_db_opts["trail"])
        parser.add_argument("population",
                            type=int,
                            metavar="mu",
                            help="Size of the population. Serves as mu "
                            " in varOr type runs.")
        parser.add_argument("lambda_",
                            metavar="lambda",
                            type=int,
                            help="Size of the offspring pool (lambda). "
                            "Required in varOr type runs.")
        parser.add_argument("moves",
                            type=int,
                            metavar="moves",
                            help="Maximum moves for agent.")
        parser.add_argument("network",
                            type=int,
                            metavar='network',
                            nargs='*',
                            help=textwrap.dedent("Network type to use."),
                            choices=valid_db_opts["network"])

        group = parser.add_argument_group('Application Options')
        group.add_argument("--disable-db",
                           action='store_true',
                           help="Disables logging of run to database.")
        group.add_argument(
            "--debug",
            action='store_true',
            help="Enables debug messages and flag for data in DB.")
        group.add_argument("-q",
                           "--quiet",
                           action='store_true',
                           help="Disables all output from application.")
        group.add_argument(
            "--script-mode",
            action='store_true',
            help="Disables progress bar and prints information to stdout.")
        group.add_argument("-r",
                           "--repeat",
                           type=int,
                           nargs="?",
                           default=1,
                           help="Number of times to repeat simulations.")
        group.add_argument("--no-early-quit",
                           action='store_true',
                           help='Disables automatic or early termination.')

        group = parser.add_argument_group('Genetic Algorithm Configuration')
        group.add_argument("-g",
                           "--generations",
                           type=int,
                           nargs="?",
                           default=GENS_DEF,
                           help="Number of generations to run for.")
        group.add_argument("--variation",
                           type=int,
                           default=VARIATION_DEF,
                           help="Variation type to use in DEAP.",
                           choices=valid_db_opts["variations"])
        group.add_argument("--mutate-type",
                           type=int,
                           nargs="?",
                           default=MUTATE_DEF,
                           help="Mutation type.",
                           choices=valid_db_opts["mutate"])
        group.add_argument("--prob-mutate",
                           type=float,
                           nargs="?",
                           default=P_MUTATE_DEF,
                           help="Probability of a mutation to occur.")
        group.add_argument("--prob-crossover",
                           type=float,
                           nargs="?",
                           default=P_CROSSOVER_DEF,
                           help="Probability of crossover to occur.")
        group.add_argument("--weight-min",
                           type=float,
                           nargs="?",
                           default=WEIGHT_MIN_DEF,
                           help="Minimum weight.")
        group.add_argument("--weight-max",
                           type=float,
                           nargs="?",
                           default=WEIGHT_MAX_DEF,
                           help="Maximum weight")
        group.add_argument(
            "--mean-check-length",
            type=int,
            default=DEF_MEAN_CHANGE,
            help="Only used with variation 3. Specifies the number of "
            "previous generations\nto see if there is no change in average food "
            "consumed. Stops algorithm\nif there is no change for this period "
            "of time. Defaults to {0}.".format(DEF_MEAN_CHANGE))

        group = parser.add_argument_group('Genetic Algorithm '
                                          'Selection Configuration')
        group.add_argument("-s",
                           "--selection",
                           type=int,
                           default=SELECTION_DEF,
                           help="Selection type to use.",
                           choices=valid_db_opts["selection"])
        group.add_argument(
            "--tournament-size",
            type=int,
            default=DEF_ERROR_VAL,
            help="If using tournament selection, the size of the tournament.")

        args = parser.parse_args()

        utils.__check_args(args)

        return args
Beispiel #7
0
    def plotly_single_run_set(run_id, run_info=None):
        # Establish a database connection
        pgdb = DBUtils()

        # Fetch this run's information, if not provided.
        if run_info is None:
            run_info = pgdb.fetchRunInfo(run_id)[run_id]

        # Determine the maximum amount of food and moves possible.
        trail_data = pgdb.getTrailData(run_info["trails_id"])[0]
        max_food = np.bincount(np.squeeze(np.asarray(trail_data.flatten())))[1]
        max_moves = np.array(run_info["moves_limit"])

        # Fetch the data on the run and determine number of generations.
        gens_data = pgdb.fetchRunGenerations([run_id])[run_id]
        num_gens = len(gens_data)

        x = np.linspace(0, num_gens - 1, num=num_gens)

        # Settings used for plotting.
        chart_set_config = {
            "food": {
                "db_key": "food",
                "stats": ["max", "min", "avg", "std"],
                "title": "Food vs. Generations for Run ID {0}",
                "type": Scatter,
                "plot-mode": "lines",
                "xaxis": "Generations",
                "yaxis": "Food Consumed",
                "max-line": max_food,
                "max-title": "Available"
            },
            "moves-taken": {
                "db_key": "moves",
                "stats": ["max", "min", "avg", "std"],
                "title": "Moves Taken vs. Generations for Run ID {0}",
                "type": Scatter,
                "plot-mode": "lines",
                "xaxis": "Generations",
                "yaxis": "Moves Taken",
                "max-line": max_moves,
                "max-title": "Limit"
            },
            "moves-dir": {
                "db_key": "moves",
                "stats": ["left", "right", "forward", "none"],
                "title": "Move Types vs. Generations for Run ID {0}",
                "type": Scatter,
                "plot-mode": "lines",
                "xaxis": "Generations",
                "yaxis": "Move Type",
                "max-line": None,
            }
        }

        plot_urls = {}

        # TODO: Could multithread here to speed things up.
        for chart_type, settings in chart_set_config.items():
            traces_list = []

            # Go through each of the stats and build the traces.
            for stat in settings["stats"]:
                data_set = np.zeros((num_gens))

                for curr_gen in range(0, num_gens):
                    data_set[curr_gen] = (
                        gens_data[curr_gen][settings["db_key"]][stat])

                this_trace = settings["type"](x=x,
                                              y=data_set,
                                              mode=settings["plot-mode"],
                                              name=stat.title())

                traces_list.append(this_trace)

            # If desired, add the maximum line.
            if settings["max-line"] is not None:

                y_val = np.empty(len(x))
                y_val.fill(settings["max-line"])

                traces_list.append(settings["type"](
                    x=x,
                    y=y_val,
                    mode="lines",
                    line={
                        "dash": "dash"
                    },
                    name=settings["max-title"].title()))

            layout = Layout(
                title=settings["title"].format(run_id),
                xaxis=XAxis(title=settings["xaxis"].format(run_id)),
                yaxis=YAxis(title=settings["yaxis"].format(run_id)),
            )

            fig = Figure(data=Data(traces_list), layout=layout)

            # Generate the URL.
            plot_urls[chart_type] = chart.__generate_plotly_url(
                fig,
                filename="apigen/{0}_{1}".format(chart_type, run_id),
                fileopt='overwrite',
            )

        return plot_urls
Beispiel #8
0
    def process_targets(self):
        print("Started Stage5: ")
        """
        SPAM's process_target
        :return:
        """
        cycle_id = self.pipeline_configuration()["cycle_id"]

        fileutils = FileUtils()
        aips_id = int(random.random() * 100)
        spam.set_aips_userid(11)
        # Setting the Process Start Date Time
        start_time = str(datetime.datetime.now())
        # Taking system's in/out to backup variable
        original_stdout = sys.stdout
        original_stderr = sys.stderr
        thread_dir = os.getcwd()
        # Changing directory to fits/
        os.chdir("fits/")
        datfil_dir = thread_dir + "/datfil/"
        fits_dir = thread_dir + "/fits/"
        curr_dir = thread_dir + "/fits/"
        process_status = False
        db_model = DBUtils()
        # Get random imaging_id & project_id
        column_keys = [
            tableSchema.imaginginputId, tableSchema.projectobsnoId,
            "calibrated_fits_file"
        ]
        where_con = {"status": str(cycle_id)}
        to_be_processed = db_model.select_from_table("imaginginput",
                                                     column_keys, where_con,
                                                     None)
        imaginginput_details = random.choice(to_be_processed)
        print(imaginginput_details)
        imaging_id = imaginginput_details["imaging_id"]

        # Update status for imaginginput for selected imaging_id
        current_time_in_sec = time.time()
        current_date_timestamp = datetime.datetime.fromtimestamp(
            current_time_in_sec).strftime('%Y-%m-%d %H:%M:%S')
        update_data = {
            "set": {
                "status": "processing",
                "start_time": current_date_timestamp,
                "comments": "",
                "end_time": current_date_timestamp
            },
            "where": {
                "imaging_id": imaging_id,
            }
        }
        db_model.update_table(update_data, "imaginginput")

        project_id = imaginginput_details["project_id"]
        calibrated_fits_file = imaginginput_details["calibrated_fits_file"]

        # Using the above project_id, fetch base_path
        column_keys = ["base_path"]
        where_con = {"project_id": project_id}
        process_target_log = open('process_target.log', 'a+')
        process_target_log.write(
            '\n\n\n******PROCESS TARGET STARTED******\n\n\n')
        process_target_log.write("--->  Start Time " + start_time)
        # Logging all Standard In/Output
        sys.stdout = process_target_log
        sys.stderr = process_target_log
        base_path = db_model.select_from_table("projectobsno", column_keys,
                                               where_con, 0)
        base_path = base_path[0]
        uvfits_full_path = base_path + "/PRECALIB/" + calibrated_fits_file
        # uvfits_full_path = base_path+"/PRECALIB/"+calibrated_fits_file
        print "Copying " + uvfits_full_path + " to " + fits_dir
        copying_fits = os.system("cp " + uvfits_full_path + " " + fits_dir)
        uvfits_file = calibrated_fits_file
        # Starting spam.process_target(SPLIT_FITS_FILE)
        try:
            spam.process_target(uvfits_file,
                                allow_selfcal_skip=True,
                                add_freq_to_name=True)
            # If this process_target is success call
            # GADPU API setSuccessStatus for the current fits_id
            current_time_in_sec = time.time()
            current_date_timestamp = datetime.datetime.fromtimestamp(
                current_time_in_sec).strftime('%Y-%m-%d %H:%M:%S')
            success_update_data = {
                "set": {
                    "status": "checking",
                    "end_time": current_date_timestamp,
                    "comments": "processing done, checking"
                },
                "where": {
                    "imaging_id": imaging_id
                }
            }
            db_model.update_table(success_update_data, "imaginginput")
        except Exception, e:
            process_target_log.write("Error: " + str(e))
            # If this process_target is a failure call
            # GADPU API setFailedStatus for the current fits_id
            current_date_timestamp = datetime.datetime.fromtimestamp(
                time.time()).strftime('%Y-%m-%d %H:%M:%S')
            success_update_data = {
                "set": {
                    "status": "failed",
                    "end_time": current_date_timestamp,
                },
                "where": {
                    "imaging_id": imaging_id
                }
            }
            db_model.update_table(success_update_data, "imaginginput")
            print("Error: spam.process_tagret Failed " + uvfits_file)
Beispiel #9
0
    def pre_calibration_targets(self):
        print("Started Stage3: ")
        spam.set_aips_userid(33)
        dbutils = DBUtils()
        fileutils = FileUtils()

        cycle_id = self.pipeline_configuration()["cycle_id"]

        # while True:
        #     columnKeys = {"calibration_id"}
        #     whereData = {"comments": "c15", "status": "copying"}
        #     uncalibrated_uvfits = dbutils.select_from_table("calibrationinput", columnKeys, whereData, 0)
        #     if not uncalibrated_uvfits:
        #         break
        #     print("Waiting for bandwidth ... ")
        #     time.sleep(50)

        columnKeys = {"calibration_id", "project_id", "uvfits_file"}
        whereData = {"status": str(cycle_id)}
        uncalibrated_uvfits = dbutils.select_from_table(
            "calibrationinput", columnKeys, whereData, 0)

        if not uncalibrated_uvfits:
            print(
                "All for the data is processed ... please check the DB for pre_calib"
            )
            spam.exit()

        calibration_id = uncalibrated_uvfits[0]
        project_id = uncalibrated_uvfits[1]
        uvfits_file = uncalibrated_uvfits[2]

        columnKeys = {"base_path", "observation_no"}
        whereData = {"project_id": project_id, "cycle_id": int(cycle_id)}
        project_details = dbutils.select_from_table("projectobsno", columnKeys,
                                                    whereData, 0)

        base_path = project_details[1]
        observation_no = project_details[0]

        current_date_timestamp = datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d %H:%M:%S')

        projectobsno_update_data = {
            "set": {
                "status":
                "processing",
                "comments":
                "running precalibrate_target, calibration_id = " +
                str(calibration_id),
            },
            "where": {
                "project_id": project_id
            }
        }

        calibration_update_data = {
            "set": {
                "status": "copying",
                "start_time": current_date_timestamp
            },
            "where": {
                "calibration_id": calibration_id
            }
        }

        dbutils.update_table(projectobsno_update_data, "projectobsno")
        dbutils.update_table(calibration_update_data, "calibrationinput")

        UVFITS_FILE_NAME = uvfits_file
        UVFITS_BASE_DIR = base_path
        is_fits_dir = os.getcwd().split('/')
        print(is_fits_dir)
        SPAM_WORKING_DIR = os.getcwd()
        print(SPAM_WORKING_DIR)
        # for num in range(1, 3):
        #     SPAM_THREAD_DIR += "/" + is_fits_dir[num]
        # if 'fits' not in is_fits_dir:
        #     SPAM_THREAD_DIR = os.getcwd()
        SPAM_WORKING_DIR = os.getcwd() + "/fits/"
        print(SPAM_WORKING_DIR, UVFITS_BASE_DIR, UVFITS_FILE_NAME)
        UVFITS_FILE_PATH = UVFITS_BASE_DIR + "/" + UVFITS_FILE_NAME
        print(UVFITS_FILE_PATH)
        print(SPAM_WORKING_DIR)
        fileutils.copy_files(UVFITS_FILE_PATH, SPAM_WORKING_DIR)
        print("Copying done ==> Moving to pre_cal_target")
        current_date_timestamp = datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d %H:%M:%S')
        calibration_update_data = {
            "set": {
                "status": "processing",
                "start_time": current_date_timestamp
            },
            "where": {
                "calibration_id": calibration_id
            }
        }
        dbutils.update_table(calibration_update_data, "calibrationinput")

        fileutils.run_spam_precalibration_stage(UVFITS_BASE_DIR,
                                                SPAM_WORKING_DIR,
                                                UVFITS_FILE_NAME,
                                                observation_no)
        current_time_in_sec = time.time()
        current_date_timestamp = datetime.datetime.fromtimestamp(
            current_time_in_sec).strftime('%Y-%m-%d %H:%M:%S')

        check_status_file = glob.glob(base_path + "/PRECALIB/failed_log.txt")
        comments = "failed"
        if check_status_file:
            status = "failed"
            comments = str(open(check_status_file[0], 'r').read())
        else:
            status = "success"
            comments = "precalibrate_target done, calibration_id = " + str(
                calibration_id)

        projectobsno_update_data = {
            "set": {
                "status": status,
                "comments": comments
            },
            "where": {
                "project_id": project_id
            }
        }

        calibration_update_data = {
            "set": {
                "status": status,
                "end_time": current_date_timestamp,
                "comments": comments
            },
            "where": {
                "calibration_id": calibration_id
            }
        }

        dbutils.update_table(projectobsno_update_data, "projectobsno")
        dbutils.update_table(calibration_update_data, "calibrationinput")
Beispiel #10
0
 def setUp(self):
     # This class requires accessing the database.
     self.pgdb = DBUtils(password=os.environ['PSYCOPG2_DB_PASS'])
     self.trail_i = trail()
     self.trail_i.readTrail(TEST_TRAIL_DB_ID)
Beispiel #11
0
from User import User
from Address import Address
from Bank import Bank
from welcome import Welcome
from Utils import Utils
from DBUtils import DBUtils

welcome = Welcome()
utils = Utils()
address = Address()
user = User()
bank = Bank()
dBUtils = DBUtils()


def addUser():
    account = utils.random()
    user.setUsername(input("请输入用户名"))
    user.setPassword(input("请输入您的密码(6位数字):"))
    print("接下来要输入您的地址信息:")
    address.setCountry(input("请输入国家"))
    address.setProvince(input("请输入省份"))
    address.setStreet(input("请输入街道"))
    address.setDoor(input("请输入门牌号"))
    # 余额不允许第一次输入,需要存钱
    status = bank.bank_addUser(account, user.getUsername(), user.getPassword(),
                               address.getCountry(), address.getProvince(),
                               address.getStreet(), address.getDoor())
    if status == 1:
        print("恭喜开户成功!")
        info = '''
Beispiel #12
0
def __main__():
    dbutils = DBUtils()
    fileutils = FileUtils()

    columnKeys = {"calibration_id", "project_id", "uvfits_file"}
    whereData = {"comments": "c17", "status": "success"}
    uncalibrated_uvfits = dbutils.select_from_table("calibrationinput",
                                                    columnKeys, whereData, 0)

    calibration_id = uncalibrated_uvfits[0]
    project_id = uncalibrated_uvfits[1]
    uvfits_file = uncalibrated_uvfits[2]

    columnKeys = {"file_path", "observation_no"}
    whereData = {"project_id": project_id, "cycle_id": 17}
    project_details = dbutils.select_from_table("projectobsno", columnKeys,
                                                whereData, 0)

    base_path = project_details[1]
    observation_no = project_details[0]

    current_time_in_sec = time.time()
    current_date_timestamp = datetime.datetime.fromtimestamp(
        current_time_in_sec).strftime('%Y-%m-%d %H:%M:%S')

    projectobsno_update_data = {
        "set": {
            "status":
            "processing",
            "comments":
            "running precalibrate_target, calibration_id = " +
            str(calibration_id),
        },
        "where": {
            "project_id": project_id,
            "status": "unprocessed"
        }
    }

    calibration_update_data = {
        "set": {
            "status": "processing",
            "start_time": current_date_timestamp
        },
        "where": {
            "calibration_id": calibration_id,
            "status": "success"
        }
    }

    dbutils.update_table(projectobsno_update_data, "projectobsno")
    dbutils.update_table(calibration_update_data, "calibrationinput")

    EACH_UVFITS_FILE = base_path + '/' + uvfits_file

    UVFITS_BASE_DIR = base_path + "/"
    if not check_pipeline_flag(UVFITS_BASE_DIR):
        set_flag(UVFITS_BASE_DIR, PRECAL_PROCESSING)
        is_fits_dir = os.getcwd().split('/')
        SPAM_WORKING_DIR = os.getcwd()
        SPAM_THREAD_DIR = ""
        for num in range(1, 4):
            SPAM_THREAD_DIR += "/" + is_fits_dir[num]
        if 'fits' not in is_fits_dir:
            SPAM_THREAD_DIR = os.getcwd()
            SPAM_WORKING_DIR = os.getcwd() + "/fits/"
        copy_files(EACH_UVFITS_FILE, SPAM_WORKING_DIR)
        print "Copying done ==> Moving to pre_cal_target"

        run_spam_precalibration_stage(UVFITS_BASE_DIR, SPAM_WORKING_DIR,
                                      uvfits_file)

        check_status_file = glob.glob(base_path + "/PRECALIB/failed_log.txt")

        if check_status_file:
            status = "failed"
        else:
            status = "success"

        projectobsno_update_data = {
            "set": {
                "status":
                status,
                "comments":
                "precalibrate_target " + status + ", calibration_id = " +
                str(calibration_id),
            },
            "where": {
                "project_id": project_id
            }
        }

        calibration_update_data = {
            "set": {
                "status": status,
                "end_time": current_date_timestamp
            },
            "where": {
                "calibration_id": calibration_id
            }
        }

        dbutils.update_table(projectobsno_update_data, "projectobsno")
        dbutils.update_table(calibration_update_data, "calibrationinput")

        if status == 'success':
            calibrated_uvfits_list = glob.glob(base_path +
                                               '/PRECALIB/*.UVFITS')
            if calibrated_uvfits_list:
                for each_uvfits in calibrated_uvfits_list:
                    imaging_data = {
                        "project_id":
                        project_id,
                        "calibration_id":
                        calibration_id,
                        "calibrated_fits_file":
                        os.path.basename(each_uvfits),
                        "status":
                        "unprocessed",
                        "comments":
                        "c17",
                        "file_size":
                        fileutils.calculalate_file_sizse_in_MB(each_uvfits)
                    }
                    dbutils.insert_into_table("imaginginput", imaging_data,
                                              "imaging_id")

        delete_dir(SPAM_THREAD_DIR)
        spam.exit()
Beispiel #13
0
# -*- coding: UTF-8 -*-
#encoding:utf8

from DBUtils import DBUtils

print("mssql connect demo\n")

if __name__ == '__main__':
    db = DBUtils(host='118.31.239.85',username='******',password='******',database='GAVEL_DMG')
    db.connect()

    results = db.query('SELECT * FROM PD')
    print("项目列表: ")
    for i, r in enumerate(results):
        print('\t* ' + str(i + 1) + ' >>>  ', end="")
        print(r)
    index = input("请选择:")
    record = results[int(index)-1]
    print("您选择的项目是: ", record)

    sql = "select * from DBTABLE where DBTABLE_CPID = '" + record[1] + "'"
    records = db.query(sql)
    for i, r in enumerate(records):
        print('\t* ' + str(i + 1) + ' >>>  ', end="")
        print(r)

    # ALTER TABLE GAVEL_DMG.dbo.DBTABLE ALTER COLUMN DBTABLE_ID NVARCHAR(40) NOT NULL


    db.destory()
Beispiel #14
0
    def stage6(self):
        """
        Post processing, extract RMS from the summary file for corresponding PBCOR_IFTS file
        extract BMIN, BMAJ, BPA from the HISTORY keyword from the PBCOR FITS header and put
        KEY Value pairs in the same PBCOR FITS header using astropy.io
        :return:
        """
        dbutils = DBUtils()
        fits_images_list = glob.glob('/GARUDATA/IMAGING24/CYCLE24/*/FITS_IMAGE/*PBCOR*.FITS')
        # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4575/17_024_04NOV09/FITS_IMAGE/A3376-W.GMRT325.SP2B.PBCOR.FITS']
        # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4572/17_024_03NOV09/FITS_IMAGE/A3376-E.GMRT325.SP2B.PBCOR.FITS']
        counter = 1
        for fits_file in fits_images_list:
            counter += 1
            # fits_file = '/GARUDATA/IMAGING19/CYCLE19/5164/19_085_27DEC10/FITS_IMAGE/1445+099.GMRT325.SP2B.PBCOR.FITS'

            fits_dir = os.path.dirname(fits_file)

            fits_table = fits.open(fits_file)
            fits_header = fits_table[0].header

            data_keys = {}

            object = os.path.basename(fits_file).split('.')[0]
            # object = "A3376_E"

            # summary_file = glob.glob(fits_dir + '/spam_A3376-E*.summary')
            summary_file = glob.glob(fits_dir + '/spam_' + object + '*.summary')
            rms = "NA"
            for each_summary in summary_file:
                if 'DONE' in open(each_summary).read():
                    # print each_summary
                    lines = open(each_summary).readlines()
                    rms = lines[-1].split(' ')[-5]
                    # print rms
                else:
                    # print "Needs to be deleted"
                    if rms == "NA":
                        log_file = each_summary.replace('summary', 'log')
                        lines = open(log_file).readlines()
                        rms = lines[-2].split(' ')[0]
            if rms == "NA":
                rms = 2.11

            print(fits_file)

            if "CYCLE24" in fits_file:
                dir_path = os.path.dirname(os.path.dirname(fits_file))
                observation_no = glob.glob(dir_path+"/*.obslog")[0].split('/')[-1].split('.')[0]
                print(observation_no)
            else:
                observation_no = fits_file.split('/')[4]

            columnKeys = {
                "project_id"
            }

            if observation_no == 'MIXCYCLE':
                mix_path = fits_file.split('/')[4]+'/'+fits_file.split('/')[5]
                mix_sql = "select observation_no from projectobsno where file_path like '%"+mix_path+"%'"
                mix_cycle_data = dbutils.select_gadpu_query(mix_sql)
                observation_no = mix_cycle_data[0][0]

            whereKeys = {
                "observation_no": observation_no
            }
            print(columnKeys, whereKeys)
            project_id = dbutils.select_from_table("projectobsno", columnKeys, whereKeys, 0)
            print(project_id)
            if project_id:
                columnKeys = {
                    "das_scangroup_id",
                    "ltacomb_file"
                }
                whereKeys = {
                    "project_id": project_id,
                }
                result = dbutils.select_from_table("ltadetails", columnKeys, whereKeys, 0)

                print(result)
                print(result[1])

                sql = "select ant_mask, band_mask, calcode, chan_width, corr_version, g.observation_no, " \
                      "date_obs, ddec, dec_2000, dec_date, dra, lsr_vel1, lsr_vel2, lta_time, " \
                      "net_sign1, net_sign2, net_sign3, net_sign4, num_chans, num_pols, onsrc_time, " \
                      "proj_code, qual, ra_2000, ra_date, rest_freq1, rest_freq2, sky_freq1, " \
                      "sky_freq2, source, sta_time from das.scangroup g inner join " \
                      "das.scans s on s.scangroup_id = g.scangroup_id " \
                      "where s.scangroup_id = " + str(result[1]) + " AND source like '" + object + "'"
                scangroup_data = dbutils.select_scangroup_query(sql)

                # print(scangroup_data)

                if scangroup_data:
                    data_keys = {
                        "ANTMASK": scangroup_data[0],
                        "BANDMASK": scangroup_data[1],
                        "CALCODE": scangroup_data[2],
                        "CHANWIDT": scangroup_data[3],
                        "CORRVERS": scangroup_data[4],
                        "OBSNUM": scangroup_data[5],
                        "DATEOBS": str(scangroup_data[6]),
                        "DDEC": scangroup_data[7],
                        "DEC2000": scangroup_data[8],
                        "DECDATE": scangroup_data[9],
                        "DRA": scangroup_data[10],
                        "LSRVEL1": scangroup_data[11],
                        "LSRVEL2": scangroup_data[12],
                        "LTATIME": scangroup_data[13],
                        "NETSIGN1": scangroup_data[14],
                        "NETSIGN2": scangroup_data[15],
                        "NETSIGN3": scangroup_data[16],
                        "NETSIGN4": scangroup_data[17],
                        "NUMCHANS": scangroup_data[18],
                        "NUMPOLS": scangroup_data[19],
                        "ONSRCTIM": scangroup_data[20],
                        "PROJCODE": scangroup_data[21],
                        "QUAL": scangroup_data[22],
                        "RA2000": scangroup_data[23],
                        "RADATE": scangroup_data[24],
                        "RESTFRE1": scangroup_data[25],
                        "RESTFRE2": scangroup_data[26],
                        "SKYFREQ1": scangroup_data[27],
                        "SKYFREQ2": scangroup_data[28],
                        "STATIME": scangroup_data[30],
                        "RMS": float(rms)
                    }

                    # print(data_keys)
                    filename = fits_file
                    hdulist = fits.open(filename, mode='update')
                    header = hdulist[0].header

                    try:
                        histroy = str(fits_header["HISTORY"]).strip().split(' ')
                        nh = [x for x in histroy if x]
                        data_keys["BMAJ"] = float(nh[3])
                        data_keys["BMIN"] = float(nh[5])
                        data_keys["BPA"] = float(nh[7])
                        print(histroy)
                        try:
                            del header['HISTORY']
                        except Exception as exh:
                            print(exh)
                    except Exception as ex:
                        print(ex)
                    try:
                        if fits_header["BMAJ"]:
                            data_keys["BMAJ"] = float(fits_header["BMAJ"])
                            data_keys["BMIN"] = float(fits_header["BMIN "])
                            data_keys["BPA"] = float(fits_header["BPA"])
                    except Exception as ex:
                        print(ex)

                    pbcor_file = os.path.basename(fits_file).split('.')[0]
                    spam_log = glob.glob(os.path.dirname(fits_file) + "/spam_" + pbcor_file + "*.log")
                    spam_log.sort()
                    spam_log = spam_log[0]
                    reading_spam_log = open(spam_log).readlines()
                    bmaj_bmin = []
                    if len(reading_spam_log) > 0:
                        for each_line in reading_spam_log:
                            if "BMAJ" in each_line:
                                bmaj_bmin.append(each_line)
                        bmaj_bmin_data = bmaj_bmin[0].replace('   ',' ').replace("  "," ").replace("= ","=").split((
                            ' '))
                        print(bmaj_bmin_data)
                        for each_key in bmaj_bmin_data:
                            if "BMAJ" in each_key:
                                data_keys["BMAJ"] = float(each_key.split('=')[1])
                            if "BMIN" in each_key:
                                data_keys["BMIN"] = float(each_key.split('=')[1])
                            if "BPA" in each_key:
                                data_keys["BPA"] = float(each_key.split('/')[0].split('=')[1])
                        print( data_keys["BMAJ"], data_keys["BMIN"], data_keys["BPA"])
                        try:
                            for key, value in data_keys.iteritems():
                                print key, value
                                header.set(key, value)
                            hdulist.flush()
                        except Exception as ex:
                            print(ex)
Beispiel #15
0
    def __init__(self, iface: QgisInterface, version: str, plugin_dir: str,
                 isBatch: bool) -> None:
        """Initialise class variables."""
        settings = QSettings()
        if settings.contains('/QSWATPlus/SWATPlusDir'):
            SWATPlusDir = settings.value('/QSWATPlus/SWATPlusDir')
        else:
            SWATPlusDir = Parameters._SWATPLUSDEFAULTDIR
            if os.path.isdir(SWATPlusDir):
                settings.setValue('/QSWATPlus/SWATPlusDir',
                                  Parameters._SWATPLUSDEFAULTDIR)
        if not os.path.isdir(SWATPlusDir):
            QSWATUtils.error(
                '''Cannot find SWATPlus directory, expected to be {0}.
Please use the Parameters form to set its location.'''.format(SWATPlusDir),
                isBatch)
            self.SWATPlusDir = ''
            return
        ## SWATPlus directory
        self.SWATPlusDir = SWATPlusDir
        ## Directory containing QSWAT plugin
        self.plugin_dir = plugin_dir
        ## Databases directory: part of plugin
        # containing template project and reference databases, plus soil database for STATSGO and SSURGO
        self.dbPath = QSWATUtils.join(self.SWATPlusDir, Parameters._DBDIR)
        ## Path of template project database
        self.dbProjTemplate = QSWATUtils.join(self.dbPath, Parameters._DBPROJ)
        ## Path of template reference database
        self.dbRefTemplate = QSWATUtils.join(self.dbPath, Parameters._DBREF)
        ## Directory of TauDEM executables
        self.TauDEMDir = TauDEMUtils.findTauDEMDir(settings, not isBatch)
        ## Path of mpiexec
        self.mpiexecPath = TauDEMUtils.findMPIExecPath(settings)
        proj = QgsProject.instance()
        title = proj.title()
        ## QGIS interface
        self.iface = iface
        ## Stream burn-in depth
        self.burninDepth = proj.readNumEntry(title, 'params/burninDepth',
                                             Parameters._BURNINDEPTH)[0]
        ## Channel width multiplier
        self.channelWidthMultiplier = proj.readDoubleEntry(
            title, 'params/channelWidthMultiplier',
            Parameters._CHANNELWIDTHMULTIPLIER)[0]
        ## Channel width exponent
        self.channelWidthExponent = proj.readDoubleEntry(
            title, 'params/channelWidthExponent',
            Parameters._CHANNELWIDTHEXPONENT)[0]
        ## Channel depth multiplier
        self.channelDepthMultiplier = proj.readDoubleEntry(
            title, 'params/channelDepthMultiplier',
            Parameters._CHANNELDEPTHMULTIPLIER)[0]
        ## Channel depth exponent
        self.channelDepthExponent = proj.readDoubleEntry(
            title, 'params/channelDepthExponent',
            Parameters._CHANNELDEPTHEXPONENT)[0]
        ## reach slope multiplier
        self.reachSlopeMultiplier = proj.readDoubleEntry(
            title, 'params/reachSlopeMultiplier', Parameters._MULTIPLIER)[0]
        ## tributary slope multiplier
        self.tributarySlopeMultiplier = proj.readDoubleEntry(
            title, 'params/tributarySlopeMultiplier',
            Parameters._MULTIPLIER)[0]
        ## mean slope multiplier
        self.meanSlopeMultiplier = proj.readDoubleEntry(
            title, 'params/meanSlopeMultiplier', Parameters._MULTIPLIER)[0]
        ## main length multiplier
        self.mainLengthMultiplier = proj.readDoubleEntry(
            title, 'params/mainLengthMultiplier', Parameters._MULTIPLIER)[0]
        ## tributary length multiplier
        self.tributaryLengthMultiplier = proj.readDoubleEntry(
            title, 'params/tributaryLengthMultiplier',
            Parameters._MULTIPLIER)[0]
        ## upslope HRU drain percent
        self.upslopeHRUDrain = proj.readNumEntry(
            title, 'params/upslopeHRUDrain', Parameters._UPSLOPEHRUDRAIN)[0]
        ## Index of slope group in Layers panel
        self.slopeGroupIndex = -1
        ## Index of landuse group in Layers panel
        self.landuseGroupIndex = -1
        ## Index of soil group in Layers panel
        self.soilGroupIndex = -1
        ## Index of watershed group in Layers panel
        self.watershedGroupIndex = -1
        ## Index of results group in Layers panel
        self.resultsGroupIndex = -1
        ## Index of animation group in Layers panel
        self.animationGroupIndex = -1
        ## Flag showing if using existing watershed
        self.existingWshed = False
        ## Flag showing if using grid model
        self.useGridModel = False
        ## flag to show if using landscape units
        self.useLandscapes = False
        ## flag to show if dividing into left/right/headwater landscape units
        self.useLeftRight = False
        ## Path of DEM raster
        self.demFile = ''
        ## Path of filled DEM raster
        self.felFile = ''
        ## Path of stream burn-in shapefile
        self.burnFile = ''
        ## Path of DEM after burning-in
        self.burnedDemFile = ''
        ## Path of D8 flow direction raster
        self.pFile = ''
        ## Path of D8 flow accumulation raster
        self.ad8File = ''
        ## Path of subbasins raster
        self.basinFile = ''
        ## path of channel basins raster
        self.channelBasinFile = ''
        ## path of channel basins file with lakes masked out
        self.chBasinNoLakeFile = ''
        ## Path of channel raster
        self.srcChannelFile = ''
        ## Path of valleyDepthsFile
        # value at each point in this raster is the drop in metres
        # from the point to where its D8 flow path meets a channel
        # Channel elevations are measured at points adjacent to the channel
        # to avoid problems caused by burning-in
        self.valleyDepthsFile = ''
        ## Path of outlets shapefile
        self.outletFile = ''
        ## path of snapped outlets file
        self.snapFile = ''
        ## Path of outlets shapefile for extra reservoirs and point sources
        self.extraOutletFile = ''
        ## Path of stream shapefile
        self.streamFile = ''
        ## Path of stream shapefile calculated by delineation
        # since streamFile is set to streams from grid when using a grid model
        self.delinStreamFile = ''
        ## Path of channel shapefile
        self.channelFile = ''
        ## Path of subbasins shapefile or grid file when using grids
        self.subbasinsFile = ''
        ## Path of watershed shapefile: shows channel basins.  Not used with grid models.
        self.wshedFile = ''
        ## Path of file like D8 contributing area but with heightened values at subbasin outlets
        self.hd8File = ''
        ## Path of distance to stream outlets raster
        self.distStFile = ''
        ## Path of distance to channel raster
        self.distChFile = ''
        ## Path of slope raster
        self.slopeFile = ''
        ## path of lakes shapefile
        self.lakeFile = ''
        ## Path of slope bands raster
        self.slopeBandsFile = ''
        ## Path of landuse raster
        self.landuseFile = ''
        ## Path of soil raster
        self.soilFile = ''
        ## path of floodplain raster
        self.floodFile = ''
        ## Nodata value for DEM
        self.elevationNoData = 0
        ## DEM horizontal block size
        self.xBlockSize = 0
        ## DEM vertical block size
        self.yBlockSize = 0
        ## Nodata value for basins raster
        self.basinNoData = 0
        ## Nodata value for distance to outlets raster
        self.distStNoData = 0
        ## Nodata value for distance to channel raster
        self.distChNoData = 0
        ## Nodata value for slope raster
        self.slopeNoData = 0
        ## Nodata value for landuse raster
        self.cropNoData = 0
        ## Nodata value for soil raster
        self.soilNoData = 0
        ## Nodata value for floodplain raster
        self.floodNoData = -1
        ## Area of DEM cell in square metres
        self.cellArea = 0.0
        ## channel threshold in square metres
        self.channelThresholdArea = 10000000  # 1000 hectares default
        ## gridSize as count of DEM cells per side (grid model only)
        self.gridSize = 0
        ## list of landuses exempt from HRU removal
        self.exemptLanduses: List[str] = []
        ## table of landuses being split
        self.splitLanduses: Dict[str, Dict[str, float]] = dict()
        ## Elevation bands threshold in metres
        self.elevBandsThreshold = 0
        ## Number of elevation bands
        self.numElevBands = 0
        ## Topology object
        self.topo = QSWATTopology(isBatch)
        projFile = proj.fileName()
        projPath = QFileInfo(projFile).canonicalFilePath()
        pdir, base = os.path.split(projPath)
        ## Project name
        self.projName = os.path.splitext(base)[0]
        ## Project directory
        self.projDir = pdir
        ## QSWAT+ version
        self.version = version
        ## DEM directory
        self.demDir = ''
        ## Landuse directory
        self.landuseDir = ''
        ## Soil directory
        self.soilDir = ''
        ## Landscape directory
        self.landscapeDir = ''
        ## Floodplain directory
        self.floodDir = ''
        ## text directory
        self.textDir = ''
        ## Rasters directory
        self.rastersDir = ''
        ## Shapes directory
        self.shapesDir = ''
        ## Scenarios directory
        self.scenariosDir = ''
        ## Results directory
        self.resultsDir = ''
        ## Plots directory
        self.plotsDir = ''
        ## png directory for storing png images used to create animation videos
        self.pngDir = ''
        ## animation directory for storing animation files
        self.animationDir = ''
        self.createSubDirectories()
        ## path of full lsus shapefile
        self.fullLSUsFile = QSWATUtils.join(self.shapesDir,
                                            Parameters._LSUS1 + '.shp')
        ## path of actual lsus shapefile (after channel mergers
        self.actLSUsFile = QSWATUtils.join(self.shapesDir,
                                           Parameters._LSUS2 + '.shp')
        ## Path of FullHRUs shapefile
        self.fullHRUsFile = QSWATUtils.join(self.shapesDir,
                                            Parameters._HRUS1 + '.shp')
        ## Path of ActHRUs shapefile
        self.actHRUsFile = QSWATUtils.join(self.shapesDir,
                                           Parameters._HRUS2 + '.shp')
        ## Flag to show if running in batch mode
        self.isBatch = isBatch
        ## Path of project database
        self.db = DBUtils(self.projDir, self.projName, self.dbProjTemplate,
                          self.dbRefTemplate, self.isBatch)
        ## multiplier to turn elevations to metres
        self.verticalFactor = 1
        ## vertical units
        self.verticalUnits = Parameters._METRES
        # positions of sub windows
        ## Position of delineation form
        self.delineatePos = QPoint(0, 100)
        ## Position of HRUs form
        self.hrusPos = QPoint(0, 100)
        ## Position of parameters form
        self.parametersPos = QPoint(50, 100)
        ## Position of landscape form
        self.landscapePos = QPoint(50, 80)
        ## Position of select subbasins form
        self.selectSubsPos = QPoint(50, 100)
        ## Position of select reservoirs form
        self.selectResPos = QPoint(50, 100)
        ## Position of about form
        self.aboutPos = QPoint(50, 100)
        ## Position of elevation bands form
        self.elevationBandsPos = QPoint(50, 100)
        ## Position of split landuses form
        self.splitPos = QPoint(50, 100)
        ## Position of select landuses form
        self.selectLuPos = QPoint(50, 100)
        ## Position of exempt landuses form
        self.exemptPos = QPoint(50, 100)
        ## Position of outlets form
        self.outletsPos = QPoint(50, 100)
        ## Position of select outlets file form
        self.selectOutletFilePos = QPoint(50, 100)
        ## Position of select outlets form
        self.selectOutletPos = QPoint(50, 100)
        ## Position of visualise form
        self.visualisePos = QPoint(0, 100)
        ## rasters open that need to be closed if memory exception occurs
        self.openRasters: Set[Raster] = set()
        ## will set to choice made when converting from ArcSWAT, if that was how the project file was created
        # 0: Full
        # 1: Existing
        # 2: No GIS
        # NB These values are defined in convertFromArc.py
        self.fromArcChoice = -1
Beispiel #16
0
    def running_gvfits(self):
        print("Started Stage2: ")

        cycle_id = self.pipeline_configuration()["cycle_id"]

        dbutils = DBUtils()
        spamutils = SpamUtils()
        fileutils = FileUtils()

        currentTimeInSec = time.time()
        current_date_timestamp = datetime.datetime.fromtimestamp(
            currentTimeInSec).strftime('%Y-%m-%d %H:%M:%S')

        columnKeys = {"project_id", "ltacomb_file", "lta_id"}
        whereKeys = {"comments": str(cycle_id)}

        lta_details = dbutils.select_from_table("ltadetails", columnKeys,
                                                whereKeys, None)

        print(lta_details)

        for each_lta in lta_details:
            print(each_lta)
            project_id = each_lta["project_id"]
            # project_id = each_lta[0]
            lta_file = each_lta["ltacomb_file"]
            # lta_file = each_lta[1]
            # lta_id = each_lta[2]
            lta_id = each_lta["lta_id"]
            columnKeys = {"base_path"}
            whereKeys = {"project_id": project_id}
            lta_path_details = dbutils.select_test_table(
                "projectobsno", columnKeys, whereKeys, 0)
            print(lta_path_details)
            base_path = lta_path_details[0]
            print(base_path)
            uvfits_file = lta_file + '.UVFITS'
            base_lta = base_path + '/' + lta_file
            if os.path.exists(base_lta):
                base_uvfits = base_path + '/' + uvfits_file
                gvfits_status = spamutils.run_gvfits(base_lta, base_uvfits)
                if os.path.exists(base_uvfits):
                    status = str(cycle_id)
                else:
                    status = "failed"

                calibration_data = {
                    "project_id":
                    project_id,
                    "lta_id":
                    lta_id,
                    "uvfits_file":
                    uvfits_file,
                    "status":
                    status,
                    "comments":
                    gvfits_status,
                    "uvfits_size":
                    fileutils.calculalate_file_sizse_in_MB(base_uvfits),
                    "start_time":
                    current_date_timestamp
                }

                dbutils.insert_into_table("calibrationinput", calibration_data,
                                          "calibration_id")

            else:
                project_update_data = {
                    "set": {
                        "status": "failed",
                        "comments": "ltacomb failed"
                    },
                    "where": {
                        "project_id": project_id
                    }
                }
                lta_details_update_data = {
                    "set": {
                        "status": "failed",
                        "comments": "ltacomb failed"
                    },
                    "where": {
                        "lta_id": lta_id
                    }
                }
                dbutils.update_table(project_update_data, "projectobsno")
                dbutils.update_table(lta_details_update_data, "ltadetails")
Beispiel #17
0
from DBUtils import DBUtils
from welcome import Welcome
from User import User
from Address import Address
from Bank import Bank
from HelpUtils import Utils

DB = DBUtils()
welcome = Welcome()
User = User()
Address = Address()
Bank = Bank()
Utils = Utils()


# 开户方法
def addUser():
    # 随机生成账号
    account = Utils.getRandom()
    User.setUsername(Utils.inputHelp("用户名"))
    User.setPassword(Utils.inputHelp("密码"))
    Address.setCounrry(Utils.inputHelp("国家"))
    Address.setProvince(Utils.inputHelp("省份"))
    Address.setStreet(Utils.inputHelp("街道"))
    Address.setDoor(Utils.inputHelp("门牌号"))
    # username = input("请输入您的姓名:")
    # password = input("请输入您的密码(6个数字):")
    # print("接下来请输入您的地址信息")
    # counrry = input("\t请输入您的国家:")
    # province = input("\t请输入省份:")
    # street = input("\t请输入街道:")
Beispiel #18
0
    def combining_lsb_usb(self):
        print("Started Stage4: ")
        cycle_id = self.pipeline_configuration()["cycle_id"]

        spam.set_aips_userid(11)
        dbutils = DBUtils()
        fileutils = FileUtils()
        status = "failed"
        comments = "combine usb lsb failed"
        # fileutils = FileUtils()
        # query conditions for projectobsno
        columnKeys = {"project_id", "base_path", "observation_no"}
        whereKeys = {"isghb": True, "cycle_id": 16, "status": "now"}

        project_data = dbutils.select_from_table("projectobsno", columnKeys,
                                                 whereKeys, 0)
        print(project_data)

        project_id = project_data[1]
        base_path = project_data[2]
        obsno = project_data[0]

        start_time = datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d %H:%M:%S')
        # print(project_id, base_path, obsno)

        # query conditions for calibrationinput
        columnKeys = {"calibration_id", "uvfits_file"}
        whereKeys = {"project_id": project_id, "status": "now"}
        calibration_data = dbutils.select_from_table("calibrationinput",
                                                     columnKeys, whereKeys,
                                                     None)
        print(calibration_data)

        if not calibration_data:
            print(
                "All the data is processed ... OR \n ==> please check the DB for combinelsbusb"
            )
            spam.exit()

        print(len(calibration_data))
        if len(calibration_data) < 2:
            status = "success"
            comments = "single file combinelsbusb not required"
            usb_lsb_file = glob.glob(base_path + "/PRECALIB/*GMRT*.UVFITS")
            if calibration_data:
                projectobsno_update_data = {
                    "set": {
                        "status": status,
                        "comments": comments
                    },
                    "where": {
                        "project_id": project_id
                    }
                }
                print("Updating the projectobsno ... ")
                dbutils.update_table(projectobsno_update_data, "projectobsno")
                if calibration_data:
                    calibration_update_data = {
                        "set": {
                            "status": status,
                            "comments": comments
                        },
                        "where": {
                            "calibration_id":
                            calibration_data[0]["calibration_id"]
                        }
                    }
                    dbutils.update_table(calibration_update_data,
                                         "calibrationinput")
            else:
                projectobsno_update_data = {
                    "set": {
                        "status": "failed",
                        "comments": "Failed Error: Something went wrong"
                    },
                    "where": {
                        "project_id": project_id
                    }
                }
                print("Updating the projectobsno ... ")
                dbutils.update_table(projectobsno_update_data, "projectobsno")
        else:
            print("Values > 2")
            print("*************" + str(os.getcwd()))
            for each_uvfits in calibration_data:
                precalib_files = glob.glob(base_path + "/PRECALIB/*")
                lsb_list = glob.glob(base_path + "/PRECALIB/*_LL_*.UVFITS")
                usb_list = glob.glob(base_path + "/PRECALIB/*_RR_*.UVFITS")

                if len(lsb_list) == 0 or len(usb_list) == 0:
                    print(len(lsb_list), len(usb_list))
                    lsb_list = glob.glob(base_path + "/PRECALIB/*LSB*.UVFITS")
                    usb_list = glob.glob(base_path + "/PRECALIB/*USB*.UVFITS")

                projectobsno_update_data = {
                    "set": {
                        "status": "processing",
                        "comments": "combining_lsb_usb"
                    },
                    "where": {
                        "project_id": project_id
                    }
                }
                dbutils.update_table(projectobsno_update_data, "projectobsno")
                calibration_id = each_uvfits["calibration_id"]
                uvfits_file = each_uvfits["uvfits_file"]
                calibration_update_data = {
                    "set": {
                        "status": "processing",
                        "comments": "combining_lsb_usb",
                        "start_time": start_time
                    },
                    "where": {
                        "calibration_id": calibration_id
                    }
                }
                dbutils.update_table(calibration_update_data,
                                     "calibrationinput")
                print("lsb_list : " + str(len(lsb_list)))
                print("usb_list : " + str(len(usb_list)))
                status = "failed"
                comments = "combining lsb usb"
                if len(lsb_list) == len(usb_list):
                    print(">>>>>>COMBINE_LSB_USB<<<<<<<")
                    usb_list.sort()
                    lsb_list.sort()
                    print(usb_list)
                    print(lsb_list)
                    to_spam = list(zip(usb_list, lsb_list))
                    file_size = 0
                    print(to_spam)
                    for each_pair in to_spam:
                        print("-------------------------")
                        comb = each_pair[0].replace('USB', 'COMB')
                        data = each_pair, comb
                        print("++++++++++++++++" + comb)
                        currentTimeInSec = time.time()
                        fits_comb = comb.split('/')[-1]
                        check_comb_file = glob.glob("fits/" + fits_comb)
                        if not check_comb_file:
                            status, comments = fileutils.run_spam_combine_usb_lsb(
                                data)
                            if status == 'success':
                                status = str(cycle_id)
                            print("__________________________________________")
                            print(glob.glob("fits/*"))
                            print("__________________________________________")
                            end_time = datetime.datetime.fromtimestamp(
                                time.time()).strftime('%Y-%m-%d %H:%M:%S')
                            if not comments:
                                comments = "done combining usb lsb"
                            if glob.glob(comb):
                                file_size = fileutils.calculalate_file_sizse_in_MB(
                                    comb)
                            imagininput_data = {
                                "project_id": project_id,
                                "calibration_id": calibration_id,
                                "calibrated_fits_file": os.path.basename(comb),
                                "file_size": file_size,
                                "start_time": start_time,
                                "end_time": end_time,
                                "comments": "c16 " + comments,
                            }
                            dbutils.insert_into_table("imaginginput",
                                                      imagininput_data,
                                                      "imaging_id")
                            print("-------------------------")
                end_time = datetime.datetime.fromtimestamp(
                    time.time()).strftime('%Y-%m-%d %H:%M:%S')
                calibration_update_data = {
                    "set": {
                        "status": status,
                        "comments": comments,
                        "start_time": start_time,
                        "end_time": end_time
                    },
                    "where": {
                        "calibration_id": calibration_id
                    }
                }
                dbutils.update_table(calibration_update_data,
                                     "calibrationinput")

                projectobsno_update_data = {
                    "set": {
                        "status": status,
                        "comments": comments
                    },
                    "where": {
                        "project_id": project_id
                    }
                }
                dbutils.update_table(projectobsno_update_data, "projectobsno")
Beispiel #19
0
import os
from DBUtils import DBUtils


dbutils = DBUtils()

sql_query = "select p.base_path, p.observation_no, d.file_name from projectobsno p inner " \
            "join dataproducts d on p.project_id = d.project_id where d.file_name like '%PBCOR%'"


jpeg_images_path = '/GARUDATA/jpeg_images/'

data = dbutils.select_gadpu_query(sql_query)
counter=0
for each_row in data:
    counter+=1
    base_path = each_row[0]
    observation_no = each_row[1]
    jpeg_img_file = each_row[2].replace('.FITS','.jpeg')
    source_name = jpeg_img_file.split('.')[0]
    jpeg_img_path = base_path+'/FITS_IMAGE/'+jpeg_img_file
    if os.path.exists(jpeg_img_path):
        sql_query = "select distinct scan_id, observation_no, source from das.scans where observation_no = "+str(observation_no)+" and  source like '"+source_name+"'"
        scans_data = dbutils.select_query(sql_query)
        # print(int(observation_no), source_name, jpeg_img_path)
        if scans_data:
            cycle_dir = base_path.split('/')[3]
            images_path = jpeg_images_path+cycle_dir+'/'+str(observation_no)
            if not os.path.exists(images_path):
                os.system("mkdir -p "+images_path)
            copying = os.system("cp "+jpeg_img_path+" "+images_path+"/")
Beispiel #20
0
    def updating_fits_header(self):
        """
        Post processing, extract RMS from the summary file for corresponding PBCOR_IFTS file
        extract BMIN, BMAJ, BPA from the HISTORY keyword from the PBCOR FITS header and put
        KEY Value pairs in the same PBCOR FITS header using astropy.io
        :return:
        """
        print("Started Stage6: ")
        cycle_id = self.pipeline_configuration()["cycle_id"]
        fits_images_list_cfg = self.pipeline_configuration(
        )["fits_images_list"]

        dbutils = DBUtils.DBUtils()
        fits_images_list = glob.glob(fits_images_list_cfg)
        # fits_images_list = glob.glob('/GARUDATA/IMAGING16/CYCLE16/*/*/FITS_IMAGE/*PBCOR*.FITS')
        # fits_images_list = ['/GARUDATA/IMAGING19/CYCLE19/5164/19_085_27DEC10/FITS_IMAGE/1445+099.GMRT325.SP2B.PBCOR.FITS']
        counter = 1
        for fits_file in fits_images_list:
            counter += 1
            # fits_file = '/GARUDATA/IMAGING19/CYCLE19/5164/19_085_27DEC10/FITS_IMAGE/1445+099.GMRT325.SP2B.PBCOR.FITS'

            fits_dir = os.path.dirname(fits_file)

            fits_table = fits.open(fits_file)
            fits_header = fits_table[0].header

            data_keys = {}

            object = os.path.basename(fits_file).split('.')[0]

            summary_file = glob.glob(fits_dir + '/spam_' + object +
                                     '*.summary')
            rms = "NA"
            for each_summary in summary_file:
                if 'DONE' in open(each_summary).read():
                    # print each_summary
                    lines = open(each_summary).readlines()
                    rms = lines[-1].split(' ')[-5]
                    # print rms
                else:
                    # print "Needs to be deleted"
                    if rms == "NA":
                        log_file = each_summary.replace('summary', 'log')
                        lines = open(log_file).readlines()
                        rms = lines[-2].split(' ')[0]
            if rms == "NA":
                rms = 2.11

            observation_no = fits_file.split('/')[4]

            columnKeys = {"project_id"}
            whereKeys = {"observation_no": observation_no}

            project_id = dbutils.select_from_table("projectobsno", columnKeys,
                                                   whereKeys, 0)

            columnKeys = {"das_scangroup_id", "ltacomb_file"}
            whereKeys = {
                "project_id": project_id,
            }
            result = dbutils.select_from_table("ltadetails", columnKeys,
                                               whereKeys, 0)

            sql = "select ant_mask, band_mask, calcode, chan_width, corr_version, g.observation_no, " \
                  "date_obs, ddec, dec_2000, dec_date, dra, lsr_vel1, lsr_vel2, lta_time, " \
                  "net_sign1, net_sign2, net_sign3, net_sign4, num_chans, num_pols, onsrc_time, " \
                  "proj_code, qual, ra_2000, ra_date, rest_freq1, rest_freq2, sky_freq1, " \
                  "sky_freq2, source, sta_time from das.scangroup g inner join " \
                  "das.scans s on s.scangroup_id = g.scangroup_id " \
                  "where s.scangroup_id = " + str(result[1]) + " AND source like '" + object + "'"
            scangroup_data = dbutils.select_scangroup_query(sql)

            if scangroup_data:
                data_keys = {
                    "ANTMASK": scangroup_data[0],
                    "BANDMASK": scangroup_data[1],
                    "CALCODE": scangroup_data[2],
                    "CHANWIDT": scangroup_data[3],
                    "CORRVERS": scangroup_data[4],
                    "OBSNUM": scangroup_data[5],
                    "DATEOBS": str(scangroup_data[6]),
                    "DDEC": scangroup_data[7],
                    "DEC2000": scangroup_data[8],
                    "DECDATE": scangroup_data[9],
                    "DRA": scangroup_data[10],
                    "LSRVEL1": scangroup_data[11],
                    "LSRVEL2": scangroup_data[12],
                    "LTATIME": scangroup_data[13],
                    "NETSIGN1": scangroup_data[14],
                    "NETSIGN2": scangroup_data[15],
                    "NETSIGN3": scangroup_data[16],
                    "NETSIGN4": scangroup_data[17],
                    "NUMCHANS": scangroup_data[18],
                    "NUMPOLS": scangroup_data[19],
                    "ONSRCTIM": scangroup_data[20],
                    "PROJCODE": scangroup_data[21],
                    "QUAL": scangroup_data[22],
                    "RA2000": scangroup_data[23],
                    "RADATE": scangroup_data[24],
                    "RESTFRE1": scangroup_data[25],
                    "RESTFRE2": scangroup_data[26],
                    "SKYFREQ1": scangroup_data[27],
                    "SKYFREQ2": scangroup_data[28],
                    "STATIME": scangroup_data[30],
                    "RMS": float(rms)
                }

                # print(data_keys)
                filename = fits_file
                hdulist = fits.open(filename, mode='update')
                header = hdulist[0].header

                try:
                    histroy = str(fits_header["HISTORY"]).strip().split(' ')
                    nh = [x for x in histroy if x]
                    data_keys["BMAJ"] = float(nh[3])
                    data_keys["BMIN"] = float(nh[5])
                    data_keys["BPA"] = float(nh[7])

                    try:
                        del header['HISTORY']
                    except Exception as exh:
                        print(exh)
                except Exception as ex:
                    print(ex)

                if fits_header["BMAJ"]:
                    data_keys["BMAJ"] = float(fits_header["BMAJ"])
                    data_keys["BMIN"] = float(fits_header["BMIN "])
                    data_keys["BPA"] = float(fits_header["BPA"])

                for key, value in data_keys.iteritems():
                    print key, value
                    header.set(key, value)
                hdulist.flush()
                print(counter)
Beispiel #21
0
    def __init__(self):
        self.__pgdb = DBUtils()

        # Caches used for some functions.
        self.__gen_data_cache = {}