当前位置: 代码网 > it编程>前端脚本>Python > python从Hadoop HDFS导出数据到关系数据库

python从Hadoop HDFS导出数据到关系数据库

2024年11月06日 Python 我要评论
python从hdfs导出到关系数据库(如mysql、oracle、postgresql)一整套从hadoop hdfs中导出数据并通过datax工具导入到关系数据库的过程。操作步骤1. 定义参数和变

python从hdfs导出到关系数据库(如mysql、oracle、postgresql)

一整套从hadoop hdfs中导出数据并通过datax工具导入到关系数据库的过程。

操作步骤

1. 定义参数和变量

sql=$1                  # 导出数据的sql语句
s_tablename=$2          # 源表名
ds_name=$3              # 目标数据库名称
t_tablename=$4          # 目标表名
temptable="h2o_"`date +%s%n | md5sum | head -c 16`  # 生成一个基于时间戳的临时表名
filename=${s_tablename}_${temptable}  # 文件名
path="hdfs://prdhdfs/tmp/hdfs_to_rdb/$filename/"   # hdfs路径
local_path="/data02/dcadmin/scripts/dataos_scripts/data_exp"  # 本地脚本路径
flag=$5 # 标志,用来确定是否truncate表

2. 构造sql查询并提交给hive

echo "$sql"
sql1=`echo "$sql"|cut -d ";" -f2`  # 截取分号后的部分
sql0=`echo "$sql"|cut -d ";" -f1`  # 截取分号前的部分
sql0="$sql0;insert overwrite directory '${path}' stored as orc $sql1"  # 构建最终的sql
echo "$sql0"

kinit -kt /data02/dcadmin/keytab_shengchan/dcadmin.keytab dcadmin@sc.com
beeline -u "jdbc:hive2://prdnn1.yxbdprd.sc.ctc.com:2181, ... ,prddb1.yxbdprd.sc.ctc.com:2181/;servicediscoverymode=zookeeper;zookeepernamespace=hiveserver2" -e "set tez.queue.name=offline;$sql0 distribute by rand()"

3. 获取目标数据库连接信息并解析结果为变量

从postgresql数据库中获取目标数据库的连接信息,并解析结果为变量。

re=$(pgpassword=... psql -h 10.251.110.104 -p 18921 -u dacp -d dacp  -t <<eof
select case when ds_type = 'mysql'  then concat ('jdbc:mysql://'     ,ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname'),'?characterencoding=utf-8') 
            when ds_type = 'oracle' then ds_conf::json ->> 'url' 
            when ds_type = 'pg'     then concat ('jdbc:postgresql://',ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname')) end jdbc_url
      ,ds_acct
      ,ds_auth
      ,case when ds_type = 'mysql'  then 'mysqlwriter' 
            when ds_type = 'oracle' then 'oraclewriter' 
            when ds_type = 'pg'     then 'postgresqlwriter' end ds_type
from dacp_dev.dacp_meta_datasource
where ds_type in ('mysql', 'oracle', 'pg')
  and upper(trim(ds_name)) = upper(trim('$ds_name'))
eof
)
eval $(echo $re| awk '{printf("jdbc_url=%s; ds_acct=%s; ds_auth=%s; ds_type=%s",$1,$3,$5,$7)}')

4. 获取目标数据库密码

通过执行java程序解密数据库密码:

pw=`java -dpwd=${ds_auth} -jar $local_path/aescipher-1.0.jar`

5. 预处理sql语句

根据标志变量flag,确定是否执行truncate语句:

presql="select * from $t_tablename where 1=-1"
if [ "$flag" = "t" ];then
 presql="truncate table $t_tablename"
fi
echo "presql=$presql"

6. 数据导出并导入目标数据库

使用datax执行从hdfs导入到关系数据库的任务:

python $local_path/datax/bin/datax.py -p "-dpath=$path -dwriter=$ds_type -drdb_user=$ds_acct -drdb_pass="$pw" -drdb_jdbc="$jdbc_url" -drdb_table=$t_tablename -dpresql="$presql"" $local_path/hdfs_to_rdb.json

7. python代码详解

此外,你还展示了大量的python代码用于处理数据转换和传输。重点如下:

1. 初始化设置和依赖

加载必要的包,并初始化变量。

import time
import datetime
import os
import threadpool
import commands
import calendar
import random
import pymssql
import pymysql
import cx_oracle
import psycopg2
import socket
from pyhdfs import hdfsclient
from hashlib import md5

# 其他初始化设置

2. 连接数据库并执行sql

定义了连接数据库并执行sql的函数:

def connect_database_to_select(conn, sql):
    cursor = conn.cursor()
    try:
        cursor.execute(sql)
        result = cursor.fetchall()
        conn.commit()
        return result
    except exception as e:
        print('sql执行错误:{},执行sql:{}'.format(str(e), sql))
        sys.exit(2)
    finally:
        cursor.close()

def connect_database_to_commit(exe_type, conn, sql, insert_list):
    cursor = conn.cursor()
    try:
        if exe_type.lower() in ('delete', 'insert'):
            cursor.execute(sql)
            conn.commit()
        elif exe_type.lower() == 'insertmany':
            cursor.executemany(sql, insert_list)
            conn.commit()
    except exception as e:
        print('sql执行错误:{},执行sql:{}'.format(str(e), sql))
        sys.exit(2)
    finally:
        cursor.close()

3. 数据导出处理

执行数据导出并提交给hive:

def produce_exe_data(sql, s_tablename):
    global local_path_name
    local_path_01 = local_path_list[random.randrange(len(local_path_list))] + '/dataos_exp'
    local_path_name = "h2o_{0}_{1}".format(s_tablename, get_md5_str()).lower()
    local_path = local_path_01 + '/' + local_path_name
    if os.path.exists(local_path):
        cmd = 'rm -rf {}'.format(local_path)
        exe_system_cmd(cmd)
    os.mkdir(local_path)
    
    hdfs_path = "hdfs://prdhdfs/tmp/hdfs_to_rdb/{}".format(local_path_name)
    sql = sql.strip().strip(';')
    sql_list = sql.split(';')
    
    hive_conn = hive_connect()
    compress_sql = 'set hive.exec.compress.output=false'
    connect_database_to_commit('insert', hive_conn, compress_sql, '')
    
    for i in range(len(sql_list)):
        sql_str = sql_list[i]
        if i == len(sql_list)-1:      # 如果是最后一条sql,则执行insert overwrite directory
            sql_str='''insert overwrite directory '{0}' 
         row format delimited fields terminated by '\u0001' collection items terminated by '\n' map keys terminated by ':' 
         {1} '''.format(hdfs_path, sql_str)
        connect_database_to_commit('insert', hive_conn, sql_str, '')

    if hive_conn:
        hive_conn.close()
    
    cmd = ''' hdfs dfs -get {}/* {} '''.format(hdfs_path, local_path)
    exe_system_cmd(cmd)
    return local_path, hdfs_path

4. 多线程数据传输

利用多线程加速数据传输过程:

def thread_exe_exchange_data(g_tablename, flag, local_path, hdfs_path):
    global rdb_conn
    rdb_conn = get_rdb_database_conn()

    if flag.upper() == 't':
        presql = 'truncate table {}'.format(g_tablename)
        connect_database_to_commit('insert', rdb_conn, presql, '')

    if ds_type.lower() == 'oracle':
        global oracle_table_field       
        oracle_table_field = get_oracle_table_fields()

        localtime = str(time.strftime("%y%m%d", time.localtime()))
        ora_dir = "/data03/datafile/sqlldrdata/{0}/".format(localtime)
        if not os.path.exists(ora_dir):
            os.mkdir(ora_dir)
    
    file_lists = os.listdir(local_path)
    global exp_num_list
    global log_list
    global exception_list
    exp_num_list = []
    log_list = []
    exception_list = []

    thread_list = []
    for file_name in file_lists:
        thread_list.append(local_path + '/' + file_name)
    
    pool = threadpool.threadpool(5)
    requests = threadpool.makerequests(exchange_data, thread_list)
    [pool.putrequest(req) for req in requests]
    pool.wait()
   
    if exception_list:
        delete_local_path(local_path, hdfs_path)
        sys.exit(2)

    print('数据导出完成,导出数据总量为:{}'.format(sum(exp_num_list)))

完整python脚本

#!/bin/bash
sql=$1                  #导出数据sql
s_tablename=$2  #源表
ds_name=$3              #目标库
t_tablename=$4  #目标表
temptable="h2o_"`date +%s%n | md5sum | head -c 16`      #构造一个时间戳
filename=${s_tablename}_${temptable}    #文件名
path="hdfs://prdhdfs/tmp/hdfs_to_rdb/$filename/"
local_path="/data02/dcadmin/scripts/dataos_scripts/data_exp"
flag=$5 #t truncate 
#hadoop fs -mkdir $path
# 参数sql0为 待执行sql
echo "$sql"
sql1=`echo "$sql"|cut -d ";" -f2`
sql0=`echo "$sql"|cut -d ";" -f1`
sql0="$sql0;insert overwrite directory '${path}' stored as orc $sql1"
echo "$sql0"
# 向hive提交hql
kinit -kt /data02/dcadmin/keytab_shengchan/dcadmin.keytab dcadmin@sc.com
#beeline <<eof
#!connect jdbc:hive2://devdataosambari:2181,devdataosnn1:2181,devdataosnn2:2181/;servicediscoverymode=zookeeper;zookeepernamespace=hiveserver2


#$sql0
beeline -u "jdbc:hive2://prdnn1.yxbdprd.sc.ctc.com:2181,prdnn2.yxbdprd.sc.ctc.com:2181,prdrm1.yxbdprd.sc.ctc.com:2181,prddb2.yxbdprd.sc.ctc.com:2181,prddb1.yxbdprd.sc.ctc.com:2181/;servicediscoverymode=zookeeper;zookeepernamespace=hiveserver2" -e "set tez.queue.name=offline;$sql0 distribute by rand()"
# 获取目标数据源地址
#eval $(mysql -h 10.251.88.71 -udacp -pdacp123456 dacp_dev -e "select case when ds_type = 'mysql' then concat('jdbc:mysql://', ds_inst_loc, '/',json_unquote(json_extract(ds_conf,'$.physicaldbname')),'?characterencoding=utf-8')
#when ds_type = 'oracle' then concat('jdbc:oracle:thin:@', ds_inst_loc, '/',json_unquote(json_extract(ds_conf,'$.physicaldbname')))
#when ds_type = 'pg' then concat('jdbc:postgresql://', ds_inst_loc, '/',json_unquote(json_extract(ds_conf,'$.physicaldbname'))) end jdbc_url,
#ds_acct, ds_auth, case when ds_type = 'mysql' then 'mysqlwriter' when ds_type = 'oracle' then 'oraclewriter' when ds_type = 'pg' then 'postgresqlwriter' end ds_type
#from dacp_meta_datasource 
#where ds_type in ('mysql','oracle','pg') 
#and ds_name = '$ds_name'" | awk 'nr== 2 {printf("jdbc_url=%s; ds_acct=%s; ds_auth=%s; ds_type=%s",$1,$2,$3,$4)}')

re=$(pgpassword=jxfgckv9gjw2ohs3 psql -h 10.251.110.104 -p 18921 -u dacp -d dacp  -t <<eof
select case when ds_type = 'mysql'  then concat ('jdbc:mysql://'     ,ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname'),'?characterencoding=utf-8') 
            when ds_type = 'oracle' then ds_conf::json ->> 'url' 
            when ds_type = 'pg'     then concat ('jdbc:postgresql://',ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname')) end jdbc_url
      ,ds_acct
      ,ds_auth
      ,case when ds_type = 'mysql'  then 'mysqlwriter' 
            when ds_type = 'oracle' then 'oraclewriter' 
            when ds_type = 'pg'     then 'postgresqlwriter' end ds_type
from dacp_dev.dacp_meta_datasource
where ds_type in ('mysql', 'oracle', 'pg')
  and upper(trim(ds_name)) = upper(trim('$ds_name'))
eof
)
eval $(echo $re| awk '{printf("jdbc_url=%s; ds_acct=%s; ds_auth=%s; ds_type=%s",$1,$3,$5,$7)}')

#eval $(java -jar /data01/etl/scripts/exec_aes.jar $ds_auth | awk ' {printf("pw=%s;",$1)}')
#pw=`java -jar $local_path/exec_aes.jar $ds_auth`
pw=`java -dpwd=${ds_auth} -jar $local_path/aescipher-1.0.jar`

presql="select * from $t_tablename where 1=-1"
if [ "$flag" = "t" ];then
 presql="truncate table $t_tablename"
fi
echo "presql=$presql"

python $local_path/datax/bin/datax.py -p "-dpath=$path -dwriter=$ds_type -drdb_user=$ds_acct -drdb_pass=\"$pw\" -drdb_jdbc=\"$jdbc_url\" -drdb_table=$t_tablename -dpresql=\"$presql\"" $local_path/hdfs_to_rdb.json

# -*- coding:utf-8 -*-

import time
import datetime
import os
import threadpool
import commands
import calendar
import random
import pymssql
import pymysql
import cx_oracle
import psycopg2
import socket
from pyhdfs import hdfsclient
from hashlib import md5
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('/data02/dcadmin/scripts/common')
from connect_postgresql import postgresql_connect
from connect_hive import hive_connect

pg_conn_str='dataos_71_pg_dev'



# 本地磁盘目录,文件随机选择一个目录
local_path_list=['/data01','/data02','/data03','/data04','/data05']


def close_datadb_conn():
    if rdb_conn:
        rdb_conn.close()


def connect_database_to_select(conn,sql):
    # print('\r\n执行sql:{}'.format(sql))
    cursor = conn.cursor()
    try:
        cursor.execute(sql)
        #cursor.execute(sql.decode('utf-8').encode('gbk'))
        result = cursor.fetchall()
        conn.commit()
        return result
    except exception as e:
        print('sql执行错误:{},执行sql:{}'.format(str(e),sql))
        sys.exit(2)
    finally:
        cursor.close()

def connect_database_to_commit(exe_type,conn,sql,insert_list):
    # print('\r\n执行sql:{}'.format(sql))
    cursor = conn.cursor()
    try:
        if exe_type.lower() in ('delete','insert'):
            cursor.execute(sql)
            conn.commit()
            print('执行sql:{}'.format(sql))
        elif exe_type.lower()=='insertmany':
            cursor.executemany(sql, insert_list)
            conn.commit()
    except exception as e:
        print('sql执行错误c:{},执行sql:{}'.format(str(e),sql))
        print(sql)
        sys.exit(2)
    finally:
        cursor.close()

# 执行系统命令
def exe_system_cmd(cmd):
    status,output=commands.getstatusoutput(cmd)
    if status!=0:
        print('命令{}:执行失败,请检查!'.format(cmd))
        print('失败日志:{}'.format(output))
        sys.exit(2)
    return output


# 返回md5串
def get_md5_str():
    # 时间戳
    ts = calendar.timegm(time.gmtime())
    md5_str=md5(str(ts).encode(encoding='utf-8')).hexdigest()
    return md5_str


# 判断输入参数
def judge_input_parameters_num():
    if len(sys.argv)!=6:
        print('参数有问题,请检查!')
        print(sys.argv)
        sys.exit(2)
    else:
        sql         =sys.argv[1]            # 导出数据sql
        s_tablename =sys.argv[2]            # 源表名
        ds_name     =sys.argv[3]            # 目标库
        g_tablename =sys.argv[4]            # 目标表
        flag        =sys.argv[5]            # a:append,t:truncate
    return sql,s_tablename,ds_name,g_tablename,flag


# 执行sql语句,生成hdfs文件
def produce_exe_data(sql,s_tablename):
    global local_path_name
    # 1、创建本地文件夹
    # 随机选择一个磁盘目录:'/data01','/data02','/data03','/data04','/data05'
    local_path_01 = local_path_list[random.randrange(len(local_path_list))]+'/dataos_exp'     # /data01/dataos_exp
    local_path_name="h2o_{0}_{1}".format(s_tablename,get_md5_str()).lower()
    # local_path_name='h2o_app_hub_resource_value_level_d_e6963bad13299e939a3a4cc2b2a26a47'
    local_path=local_path_01+'/'+local_path_name
    if os.path.exists(local_path):
        cmd='rm -rf {}'.format(local_path)
        exe_system_cmd(cmd)
    os.mkdir(local_path)

    # 创建hdfs文件夹
    hdfs_path="hdfs://prdhdfs/tmp/hdfs_to_rdb/{}".format(local_path_name)

    # 处理sql,先去除两边的空格,再去除两边的分号
    sql=sql.strip().strip(';')
    sql_list=sql.split(';')
    # 依次执行切分的sql
    hive_conn=hive_connect()        # 连接生产hive
    compress_sql='set hive.exec.compress.output=false'
    print('执行sql:{}'.format(compress_sql))
    connect_database_to_commit('insert',hive_conn,compress_sql,'')
    for i in range(len(sql_list)):
        sql_str=sql_list[i]
        if i==len(sql_list)-1:      # 如果是最后一条sql,则执行insert overwrite directory
            sql_str='''insert overwrite directory '{0}' 
         row format delimited fields terminated by '\\u0001' collection items terminated by '\\n' map keys terminated by ':' 
         {1} '''.format(hdfs_path,sql_str)
        print('执行sql:{}'.format(sql_str))
        connect_database_to_commit('insert',hive_conn,sql_str,'')
    # 关闭hive连接
    if hive_conn:
        hive_conn.close()
    # 将hdfs文件从hdfs_path路径get到local_path下
    cmd=''' hdfs dfs -get {}/* {} '''.format(hdfs_path,local_path)
    exe_system_cmd(cmd)
    print('文件get成功,当前主机:{},数据临时文件夹:{}'.format(socket.gethostname(),local_path))
    return local_path,hdfs_path
# 获取目标端的连接信息
def get_rdb_conn_msg(ds_name):
    global ds_type
    global ds_acct
    global ds_auth
    global host
    global port
    global database
    global jdbc_url
    sql='''
        select ds_name
              ,ds_type
              ,ds_acct
              ,ds_auth
              ,split_part(ds_inst_loc,':',1) as host
              ,case when split_part(ds_inst_loc,':',2)='' and ds_type='oracle' then '1521' else split_part(ds_inst_loc,':',2) end as port
              ,case when lower(ds_type)='oracle' then split_part(replace(replace(replace(ds_conf::json->>'url','jdbc:oracle:thin:@',''),':1521',''),'/',':'),':',2) else ds_conf::json->>'physicaldbname' end as database
              ,case when ds_type = 'mysql'  then concat ('jdbc:mysql://'     ,ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname'),'?characterencoding=utf-8')
                    when ds_type = 'oracle' then ds_conf::json ->>'url'
                    when ds_type = 'pg'     then concat ('jdbc:postgresql://',ds_inst_loc,'/',(ds_conf::json ->>'physicaldbname')) end as jdbc_url
        from dacp_dev.dacp_meta_datasource
        where ds_type in ('mysql', 'oracle', 'pg')
          and upper(trim(ds_name)) = upper(trim('{}')) '''.format(ds_name)
    pg_conn=postgresql_connect(pg_conn_str)
    results=connect_database_to_select(pg_conn,sql)
    # print(results)
    if not results:
        print('未查询到数据库连接信息,请检查,ds_name:{}'.format(ds_name))
        sys.exit(2)
    # 关闭数据库连接
    if pg_conn:
        pg_conn.close()
    # 解密密码
    cmd='''java -dpwd='{0}' -jar /data02/dcadmin/scripts/common/aescipher-1.0.jar'''.format(results[0][3])
    pw=exe_system_cmd(cmd).replace('\r','').replace('\n','')
    ds_type = results[0][1]ds_acct = results[0][2]
    ds_auth = pw
    host    = results[0][4]
    port    = int(results[0][5])
    database= results[0][6]
    jdbc_url= results[0][7]


# 判断连接的数据库类型,并返回数据库连接conn
def get_rdb_database_conn():
    dbms_conn=none
    try:
        if ds_type.upper()=='sqlserver':
            dbms_conn = pymssql.connect(host=host  , user=ds_acct, password=ds_auth, port=port, database=database, charset='utf8')
        elif ds_type.upper()=='mysql':
            dbms_conn = pymysql.connect(host=host  , user=ds_acct, passwd=ds_auth  , port=port, database=database, charset='utf8', local_infile=true)
        elif ds_type.upper()=='oracle':
            listener = '{0}:{1}/{2}'.format(host,port,database)
            print('listener:{}'.format(listener))
            dbms_conn = cx_oracle.connect(ds_acct,ds_auth,listener,encoding='utf-8')
        elif ds_type.upper() in ('postgresql','pg'):
            dbms_conn = psycopg2.connect(host=host , user=ds_acct, password=ds_auth  , port=port, database=database, client_encoding='utf8')
        else:
            print("未知源端数据库类型{}~~~~~,请检查!".format(ds_type.upper()))
            sys.exit(2)
    except exception as e:
        print('{0},{1}数据库连接失败,请检查~~~~~~!'.format(ds_type,ds_name))
        print('报错日志:{}'.format(e))
        print(host)
        print(ds_acct)
        print(ds_auth)
        print(port)
        print(database)
        sys.exit(2)
    return dbms_conn


def thread_exe_exchange_data(g_tablename,flag,local_path,hdfs_path):
    global rdb_conn
    rdb_conn=get_rdb_database_conn()
    # 执行预处理sql
    if flag.upper()=='t':
        presql='truncate table {}'.format(g_tablename)
        print('执行sql:{}'.format(presql))
        connect_database_to_commit('insert',rdb_conn,presql,'')
    # 获取oracle表结构
    if ds_type.lower() in ('oracle'):
        global oracle_table_field       # oracle 表结构
        oracle_table_field=get_oracle_table_fields()

        # 创建ctl,bad,log存放目录
        global ora_dir
        localtime = str(time.strftime("%y%m%d", time.localtime()))
        ora_dir = "/data03/datafile/sqlldrdata/{0}/".format(localtime)
        if not os.path.exists(ora_dir):
            os.mkdir(ora_dir)

    # 文件列表
    file_lists=os.listdir(local_path)

    # 多线程导数
    global exp_num_list     # 存储导数数量
    global log_list         # 存储多线程的日志信息
    global exception_list   # 存储多线程异常信息
    exp_num_list  =[]
    log_list      =[]
    exception_list=[]

    thread_list=[]          # 存储多线程任务
    for file_name in file_lists:
        thread_list.append(local_path+'/'+file_name)
    # 创建线程池
    pool=threadpool.threadpool(5)
    # 存放任务列表
    requests = threadpool.makerequests(exchange_data,thread_list)
    [pool.putrequest(req) for req in requests]
    pool.wait()
    # 处理异常
    if exception_list:
        # 导数出现异常,删除文件
        delete_local_path(local_path,hdfs_path)
        print('导数失败,异常日志信息如下:')
        for except_msg in exception_list:
            print(except_msg)
        sys.exit(2)

    # 打印多线程日志
    # log_list.sort()
    # for log in log_list:
    #     print(log)

    # 打印导出结果
    print('数据导出完成,导出数据总量为:{}'.format(sum(exp_num_list)))



# 获取oracle表结构
def get_oracle_table_fields():
    sql = '''
             select column_name || suffix as aa
               from (select a.column_name
                           ,a.column_id
                           ,case when upper(a.data_type) like '%date%'      then ' date "yyyy-mm-dd hh24:mi:ss"'
                                 when upper(a.data_type) like '%timestamp%' then ' date "yyyy-mm-dd hh24:mi:ss.ff"'
                                 when upper(a.data_type) like '%varchar%'   then ' char(3000)'
                                 else '' end as suffix
                       from all_tab_columns a
                      where upper(a.owner||'.'||a.table_name) = upper(trim('{0}'))
                      order by a.column_id) '''
    if '.' in g_tablename:
        sql=sql.format(g_tablename)
    else:
        sql=sql.format(database+'.'+g_tablename)
    oracle_table_fields=connect_database_to_select(rdb_conn,sql)
    if not oracle_table_fields:
        print('未查询到表结构,表名:{}'.format(g_tablename))
        sys.exit(2)
    oracle_table_field = ",\n".join([str(list[0]) for list in oracle_table_fields])
    return oracle_table_field
    # 执行单个导出任务
def exchange_data(file_path):
    try:
        output=''
        # 执行导数任务
        if ds_type.lower() in ('pg','postgresql','telpg','antdb'):
            cmd='''psql "port={0} host={1} user={2} dbname={3} password={4} " -c "\copy {5} from '{6}' delimiter as e'\u0001' " '''
            cmd=cmd.format(port,host,ds_acct,database,ds_auth,g_tablename,file_path)
            status,output=commands.getstatusoutput(cmd)
            if status!=0:
                exception_list.append('命令{}:执行失败,请检查!失败日志:{}'.format(cmd,output))
        elif ds_type.lower() in ('mysql','teldb'):
            mysql_conn = pymysql.connect(host=host  , user=ds_acct, passwd=ds_auth  , port=port, database=database, charset='utf8', local_infile=true)
            mysql_cursor=mysql_conn.cursor()
            sql='set names utf8'
            mysql_cursor.execute(sql)
            sql='''load data local infile '{}' into table {} fields terminated by x'01' lines terminated by '\\n'  '''.format(file_path,g_tablename)
            #print(sql)
            output=mysql_cursor.execute(sql)
            mysql_conn.commit()
            mysql_conn.close()
            # cmd='''mysql -h {} -p {} -u {} -p{} -d {} -e "set names utf8;load data local infile '{}' into table {} fields terminated by x'01' lines terminated by '\\n'"  '''
            # cmd=cmd.format(host,port,ds_acct,ds_auth,database,file_path,g_tablename)
        elif ds_type.lower() in ('oracle'):
            tns='''\'{}/"{}"\'@{}:1521/{}'''.format(ds_acct,ds_auth,host,database)
            ora_file_name=file_path.replace(local_path+'/','')
            ora_file_path=ora_dir+'/'+local_path_name+'_'+ora_file_name
            control_file = ora_file_path+".ctl"
            log_file     = ora_file_path+".log"
            bad_file     = ora_file_path+".bad"
            dis_file     = ora_file_path+".dis"
            content ='''
unrecoverable load data characterset al32utf8 
append into table {0} fields terminated by x'01' 
trailing nullcols ({1}) '''.format(g_tablename, oracle_table_field)
            # 如果控制文件存在,则先删除
            if os.path.exists(control_file):
                cmd='rm -rf {}'.format(control_file)
                exe_system_cmd(cmd)
            # 再创建控制文件
             with open(control_file, "w") as file:
                file.write(content)
            cmd='''export oracle_home=/data03/apps/db_1;export ld_library_path=$oracle_home/lib:$ld_library_path;cat {0} | /data03/apps/db_1/bin/sqlldr userid={1} control={2} data=\\"-\\" log={3} bad={4} discard={5} errors=0 direct=true parallel=true multithreading=true columnarrayrows=100000 streamsize=20971520 readsize=20971520 bindsize=20971520 date_cache=0 '''
            cmd=cmd.format(file_path, tns, control_file, log_file, bad_file, dis_file)
            status,output=commands.getstatusoutput(cmd)
            if status!=0:
                exception_list.append('命令{}:执行失败,请检查!失败日志:{}'.format(cmd,output))
        else:
            exception_list.append('目标端数据库类型为:{},此类型暂未支持!'.format(db_type.lower()))

        # 计算导出行数
        if ds_type.lower() in ('pg','postgresql','telpg','antdb'):
            file_row_num=int(output.split('copy ')[1].strip())
            exp_num_list.append(file_row_num)
        elif ds_type.lower() in ('oracle'):
            try:
                output=output.decode('gbk')
            except:
                output=output
            file_row_num=int(output.split('逻辑记录计数 ')[1].replace('。','').strip())
            exp_num_list.append(file_row_num)
        elif ds_type.lower() in ('mysql','teldb'):
            exp_num_list.append(output)
        # 插入日志
        log_list.append(output)
    except exception as e:
        exception_list.append(e)




def delete_local_path(local_path,hdfs_path):
    cmd='rm -rf {}'.format(local_path)
    exe_system_cmd(cmd)
    print('本地文件夹删除成功。')
    cmd='hdfs dfs -rm -r {}'.format(hdfs_path)
    exe_system_cmd(cmd)
    print('hdfs文件夹删除成功。')
if __name__ == '__main__':
    starttime = datetime.datetime.now()
    print('开始时间:{0}'.format(starttime.strftime('%y-%m-%d %h:%m:%s')))
    # 1、判断输入参数
    sql,s_tablename,ds_name,g_tablename,flag=judge_input_parameters_num()
    # 2、执行sql,生产文件,并返回本地目录
    local_path,hdfs_path=produce_exe_data(sql,s_tablename)
    hdfs_time=datetime.datetime.now()
    #print('当前时间:{}'.format(hdfs_time))
    print("生成hdfs文件耗时:{0}秒".format((hdfs_time - starttime).seconds))
    # 3、获取目标端连接信息(host,port等)
    get_rdb_conn_msg(ds_name)
    # 4、执行导数任务
    thread_exe_exchange_data(g_tablename,flag,local_path,hdfs_path)
    # 5、删除本地文件夹
    delete_local_path(local_path,hdfs_path)
    # 6、关闭数据库连接
    close_datadb_conn()
    endtime = datetime.datetime.now()
    print('结束时间:{0}'.format(endtime.strftime('%y-%m-%d %h:%m:%s')))
    print('导数耗时:{0}秒'.format((endtime - hdfs_time).seconds))
    print("一共耗时:{0}秒".format((endtime - starttime).seconds))

总结

整个脚本有效地实现了从hdfs到关系数据库的数据迁移,确保数据的完整性和一致性。首先通过hive导出数据,再利用多线程和datax工具导入到目标数据库。本地化和多线程处理使过程更高效,适合大数据处理和数据仓库迁移。

请务必按需调整脚本中的具体参数和配置以适应你的环境和数据架构。

以上就是python从hadoop hdfs导出数据到关系数据库的详细内容,更多关于python hdfs导出数据到数据库的资料请关注代码网其它相关文章!

(0)

相关文章:

版权声明:本文内容由互联网用户贡献,该文观点仅代表作者本人。本站仅提供信息存储服务,不拥有所有权,不承担相关法律责任。 如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 2386932994@qq.com 举报,一经查实将立刻删除。

发表评论

验证码:
Copyright © 2017-2025  代码网 保留所有权利. 粤ICP备2024248653号
站长QQ:2386932994 | 联系邮箱:2386932994@qq.com