#!/bin/bash
cur_date=`date +%Y-%m-%d`
table=pay
datebase=ods
hive_table=pay
dir=bigdata/public/ods
if hdfs dfs -test -e /$dir/$table
then
dates=$(hdfs dfs -ls /$dir/$table)
lastest=$(echo $dates | grep -o -E -e "[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}" |uniq -d )
yestday=$(echo ${lastest:0-10:10})
else
yestday=1900-01-01
fi
if test $yestday = $cur_date
then
hdfs dfs -rm -r /$dir/$table/$cur_date
hive -e "alter table $datebase.$hive_table drop partition (pdate='$cur_date');"
dates=$(hdfs dfs -ls /$dir/$table)
lastest=$(echo $dates | grep -o -E -e "[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}" |uniq -d )
yestday=$(echo ${lastest:0-10:10})
fi
if test $yestday==''
then
yestday=1900-01-01
fi
output=$( \
sqoop import \
--connect jdbc:mysql://192.168.241.1/datebase \
--username Upp \
--password Upp \
--target-dir /${dir}/${table}/${cur_date} \
--query "select * from ${table} where \$CONDITIONS and (CAST(create_date as DATE)<'${cur_date}' and CAST(create_date as DATE)>='${yestday}') or (CAST(last_edit_date as DATE)<'${cur_date}' and CAST(last_edit_date as DATE)>='${yestday}')" \
--fields-terminated-by '\001' \
--split-by id \
--m 1 \
2>&1)
if [ $? -ne 0 ];then
echo "wrong"
echo "---------------------------------------------------------------------------------------------"
echo $output
fi
hive -e "
use $datebase;
create external table if not exists $hive_table (
fmid string
) partitioned by (pdate string) location 'hdfs://ns1/${dir}/${table}';
alter table $hive_table add partition (pdate='${cur_date}') location 'hdfs://ns1/${dir}/${table}/${cur_date}';"
最近改進的sqoop腳本
功能介紹:
按天從數據庫根據兩個字段拉取增量,如果是第一次跑會跑全量,在表名的目錄下生成日期文件夾並且生成外部分區表.
每次運行會查詢到該文件夾下最新的日期,如果和今天日期一樣,將會刪除並且新增今天新數據.
這樣子重複執行增量腳本不會報錯,不需要手動刪除hdfs文件還有刪除掉hive表當天的分區.