一、数据拆分详解
1. 按条件拆分数据
1.1 单条件拆分
import pandas as pd
# 创建示例数据
data = {
'name': ['alice', 'bob', 'charlie', 'david', 'eva', 'frank', 'grace'],
'age': [25, 30, 35, 40, 45, 28, 33],
'department': ['hr', 'it', 'hr', 'finance', 'it', 'marketing', 'hr'],
'salary': [5000, 7000, 5500, 9000, 7500, 6000, 5800]
}
df = pd.dataframe(data)
# 单条件拆分 - 筛选hr部门的员工
hr_employees = df[df['department'] == 'hr']
print("hr部门员工:")
print(hr_employees)
# 等价写法
hr_employees = df.query('department == "hr"')
1.2 多条件组合拆分
# and条件: 年龄大于30且薪资低于6000
condition = (df['age'] > 30) & (df['salary'] < 6000)
filtered_df = df[condition]
print("\n年龄>30且薪资<6000的员工:")
print(filtered_df)
# or条件: hr部门或it部门
condition = (df['department'] == 'hr') | (df['department'] == 'it')
dept_filtered = df[condition]
print("\nhr或it部门的员工:")
print(dept_filtered)
# not条件: 非hr部门
non_hr = df[~df['department'].isin(['hr'])]
print("\n非hr部门的员工:")
print(non_hr)
1.3 使用isin()进行多值筛选
# 筛选特定部门的员工
target_departments = ['hr', 'finance']
dept_filter = df['department'].isin(target_departments)
filtered_df = df[dept_filter]
print("\nhr和finance部门的员工:")
print(filtered_df)
2. 按比例拆分数据
2.1 简单随机拆分
from sklearn.model_selection import train_test_split
# 随机拆分: 70%训练集, 30%测试集
train_df, test_df = train_test_split(df, test_size=0.3, random_state=42)
print(f"\n训练集 ({len(train_df)}条):")
print(train_df)
print(f"\n测试集 ({len(test_df)}条):")
print(test_df)
2.2 分层抽样拆分
# 按部门分层抽样,保持各部门比例
stratified_split = train_test_split(
df,
test_size=0.3,
random_state=42,
stratify=df['department']
)
train_strat, test_strat = stratified_split
print("\n分层抽样后的部门分布:")
print("训练集部门分布:")
print(train_strat['department'].value_counts(normalize=true))
print("\n测试集部门分布:")
print(test_strat['department'].value_counts(normalize=true))
2.3 时间序列拆分
# 添加日期列
df['join_date'] = pd.to_datetime(['2020-01-15', '2019-05-20', '2021-03-10',
'2018-11-05', '2022-02-28', '2020-07-15', '2019-09-01'])
# 按时间点拆分
cutoff_date = pd.to_datetime('2021-01-01')
historical = df[df['join_date'] < cutoff_date]
recent = df[df['join_date'] >= cutoff_date]
print(f"\n历史数据(2021年前加入, {len(historical)}条):")
print(historical)
print(f"\n近期数据(2021年后加入, {len(recent)}条):")
print(recent)
3. 按组拆分数据
3.1 使用groupby拆分
# 按部门分组
department_groups = df.groupby('department')
# 查看分组结果
print("\n按部门分组结果:")
for name, group in department_groups:
print(f"\n{name}部门:")
print(group)
# 获取特定组
hr_group = department_groups.get_group('hr')
print("\nhr部门数据:")
print(hr_group)
3.2 拆分为多个dataframe
# 将每个部门的数据保存到单独的dataframe
department_dfs = {name: group for name, group in department_groups}
# 访问特定部门的数据
print("\nit部门数据:")
print(department_dfs['it'])
# 或者拆分为列表
department_list = [group for _, group in department_groups]
二、数据合并详解
1. concat方法
1.1 垂直合并(行方向)
# 创建两个相似结构的dataframe
df1 = pd.dataframe({
'name': ['alice', 'bob'],
'age': [25, 30],
'department': ['hr', 'it']
})
df2 = pd.dataframe({
'name': ['charlie', 'david'],
'age': [35, 40],
'department': ['finance', 'it']
})
# 垂直合并
combined = pd.concat([df1, df2], axis=0)
print("\n垂直合并结果:")
print(combined)
# 重置索引
combined_reset = pd.concat([df1, df2], axis=0, ignore_index=true)
print("\n重置索引后的合并结果:")
print(combined_reset)
1.2 水平合并(列方向)
# 创建两个不同列的dataframe
info_df = pd.dataframe({
'name': ['alice', 'bob', 'charlie', 'david'],
'employee_id': [101, 102, 103, 104]
})
salary_df = pd.dataframe({
'name': ['alice', 'bob', 'charlie', 'david'],
'salary': [5000, 7000, 5500, 9000],
'bonus': [500, 700, 550, 900]
})
# 水平合并
combined_cols = pd.concat([info_df, salary_df.drop('name', axis=1)], axis=1)
print("\n水平合并结果:")
print(combined_cols)
1.3 处理不同索引
# 设置不同索引
df1_indexed = df1.set_index('name')
df2_indexed = df2.set_index('name')
# 合并时保留所有索引
combined_index = pd.concat([df1_indexed, df2_indexed], axis=0)
print("\n保留所有索引的合并:")
print(combined_index)
2. merge方法
2.1 基本合并操作
# 员工信息
employees = pd.dataframe({
'employee_id': [101, 102, 103, 104, 105],
'name': ['alice', 'bob', 'charlie', 'david', 'eva'],
'dept_id': [1, 2, 1, 3, 2]
})
# 部门信息
departments = pd.dataframe({
'dept_id': [1, 2, 3, 4],
'dept_name': ['hr', 'it', 'finance', 'marketing'],
'location': ['floor1', 'floor2', 'floor3', 'floor4']
})
# 内连接(默认)
inner_merge = pd.merge(employees, departments, on='dept_id')
print("\n内连接结果:")
print(inner_merge)
# 左连接
left_merge = pd.merge(employees, departments, on='dept_id', how='left')
print("\n左连接结果:")
print(left_merge)
# 右连接
right_merge = pd.merge(employees, departments, on='dept_id', how='right')
print("\n右连接结果:")
print(right_merge)
# 全外连接
outer_merge = pd.merge(employees, departments, on='dept_id', how='outer')
print("\n全外连接结果:")
print(outer_merge)
2.2 多键合并
# 添加位置信息
employees['location'] = ['floor1', 'floor2', 'floor1', 'floor3', 'floor2']
# 按部门和位置合并
multi_key_merge = pd.merge(
employees,
departments,
left_on=['dept_id', 'location'],
right_on=['dept_id', 'location'],
how='left'
)
print("\n多键合并结果:")
print(multi_key_merge)
2.3 处理重复列名
# 两个表都有'name'列
departments['manager'] = ['alice', 'bob', 'charlie', 'david']
# 合并时处理重复列名
merge_with_suffix = pd.merge(
employees,
departments,
left_on='dept_id',
right_on='dept_id',
suffixes=('_employee', '_manager')
)
print("\n处理重复列名的合并:")
print(merge_with_suffix)
3. join方法
3.1 基于索引的合并
# 设置索引
employees_indexed = employees.set_index('employee_id')
salary_info = pd.dataframe({
'employee_id': [101, 102, 103, 104, 105],
'salary': [5000, 7000, 5500, 9000, 7500],
'bonus': [500, 700, 550, 900, 750]
}).set_index('employee_id')
# 使用join合并
joined_df = employees_indexed.join(salary_info)
print("\n基于索引的join合并:")
print(joined_df)
3.2 不同join类型
# 创建不完整的数据
partial_salary = salary_info.drop(index=[104, 105])
# 内连接
inner_join = employees_indexed.join(partial_salary, how='inner')
print("\n内连接join结果:")
print(inner_join)
# 左连接
left_join = employees_indexed.join(partial_salary, how='left')
print("\n左连接join结果:")
print(left_join)
三、高级合并技巧
1. 合并时的冲突处理
# 创建有冲突的数据
df_conflict1 = pd.dataframe({
'id': [1, 2, 3],
'value': ['a', 'b', 'c']
})
df_conflict2 = pd.dataframe({
'id': [2, 3, 4],
'value': ['x', 'y', 'z']
})
# 合并时处理冲突
merged_conflict = pd.merge(
df_conflict1,
df_conflict2,
on='id',
how='outer',
suffixes=('_left', '_right')
)
# 解决冲突 - 优先使用右边的值
merged_conflict['value'] = merged_conflict['value_right'].fillna(merged_conflict['value_left'])
merged_conflict = merged_conflict.drop(['value_left', 'value_right'], axis=1)
print("\n冲突处理后的合并结果:")
print(merged_conflict)
2. 合并时的复杂条件
# 创建需要复杂条件合并的数据
orders = pd.dataframe({
'order_id': [1, 2, 3, 4, 5],
'customer_id': [101, 102, 101, 103, 104],
'order_date': pd.to_datetime(['2023-01-01', '2023-01-02', '2023-01-03', '2023-01-04', '2023-01-05']),
'amount': [100, 200, 150, 300, 250]
})
customers = pd.dataframe({
'customer_id': [101, 102, 103, 105],
'join_date': pd.to_datetime(['2022-01-01', '2022-05-15', '2022-11-20', '2023-01-01']),
'tier': ['gold', 'silver', 'silver', 'bronze']
})
# 合并后筛选: 只保留下单日期晚于加入日期的记录
merged_complex = pd.merge(
orders,
customers,
on='customer_id',
how='left'
)
merged_complex = merged_complex[merged_complex['order_date'] >= merged_complex['join_date']]
print("\n复杂条件合并结果:")
print(merged_complex)
3. 大型数据集的合并优化
import numpy as np
# 创建大型数据集
large_df1 = pd.dataframe({
'id': range(1, 100001),
'value1': np.random.rand(100000)
})
large_df2 = pd.dataframe({
'id': range(50000, 150001),
'value2': np.random.rand(100000)
})
# 优化合并方法1: 指定合并键的数据类型
large_df1['id'] = large_df1['id'].astype('int32')
large_df2['id'] = large_df2['id'].astype('int32')
# 优化合并方法2: 使用更高效的合并方式
%timeit pd.merge(large_df1, large_df2, on='id') # 测量执行时间
# 优化合并方法3: 先筛选再合并
filtered_df2 = large_df2[large_df2['id'] <= 100000]
%timeit pd.merge(large_df1, filtered_df2, on='id')
四、实际应用案例
1. 电商数据分析
# 创建电商数据集
orders = pd.dataframe({
'order_id': [1001, 1002, 1003, 1004, 1005],
'customer_id': [201, 202, 203, 204, 205],
'order_date': pd.to_datetime(['2023-01-01', '2023-01-02', '2023-01-02', '2023-01-03', '2023-01-04']),
'amount': [150.0, 200.0, 75.5, 300.0, 125.0]
})
customers = pd.dataframe({
'customer_id': [201, 202, 203, 204, 206],
'name': ['alice', 'bob', 'charlie', 'david', 'eva'],
'join_date': pd.to_datetime(['2022-01-15', '2022-03-20', '2022-05-10', '2022-07-05', '2022-09-01']),
'tier': ['gold', 'silver', 'silver', 'bronze', 'gold']
})
products = pd.dataframe({
'order_id': [1001, 1001, 1002, 1003, 1004, 1004, 1005],
'product_id': [1, 2, 1, 3, 2, 3, 1],
'quantity': [1, 2, 1, 1, 3, 1, 2],
'price': [50.0, 50.0, 200.0, 75.5, 100.0, 100.0, 62.5]
})
# 合并订单和客户信息
order_customer = pd.merge(orders, customers, on='customer_id', how='left')
# 合并订单详情
full_data = pd.merge(order_customer, products, on='order_id', how='left')
# 计算扩展金额
full_data['extended_price'] = full_data['quantity'] * full_data['price']
# 按客户分析
customer_analysis = full_data.groupby(['customer_id', 'name', 'tier']).agg(
total_orders=('order_id', 'nunique'),
total_amount=('amount', 'sum'),
total_items=('quantity', 'sum')
).reset_index()
print("\n完整的电商合并数据:")
print(full_data)
print("\n客户分析:")
print(customer_analysis)
2. 学生成绩分析
# 创建学生数据集
students = pd.dataframe({
'student_id': [1, 2, 3, 4, 5],
'name': ['alice', 'bob', 'charlie', 'david', 'eva'],
'class': ['a', 'b', 'a', 'b', 'a']
})
grades_math = pd.dataframe({
'student_id': [1, 2, 3, 4, 6],
'math_score': [90, 85, 78, 92, 88],
'math_rank': [1, 2, 3, 1, 2]
})
grades_english = pd.dataframe({
'student_id': [1, 3, 4, 5, 7],
'english_score': [88, 76, 95, 82, 90],
'english_rank': [2, 3, 1, 4, 1]
})
# 合并所有成绩
all_grades = pd.merge(
pd.merge(students, grades_math, on='student_id', how='left'),
grades_english,
on='student_id',
how='left'
)
# 计算平均分和排名
all_grades['average_score'] = all_grades[['math_score', 'english_score']].mean(axis=1)
all_grades['average_rank'] = all_grades[['math_rank', 'english_rank']].mean(axis=1)
# 按班级分析
class_analysis = all_grades.groupby('class').agg(
avg_math=('math_score', 'mean'),
avg_english=('english_score', 'mean'),
top_math=('math_score', 'max'),
top_english=('english_score', 'max')
).reset_index()
print("\n完整的学生成绩数据:")
print(all_grades)
print("\n班级分析:")
print(class_analysis)
五、最佳实践和常见问题
1. 合并前的准备工作
# 1. 检查键的唯一性
print("\n客户id在customers表中的唯一性:", customers['customer_id'].is_unique)
print("订单id在orders表中的唯一性:", orders['order_id'].is_unique)
# 2. 检查缺失值
print("\ncustomers表中customer_id的缺失值:", customers['customer_id'].isnull().sum())
print("orders表中customer_id的缺失值:", orders['customer_id'].isnull().sum())
# 3. 检查数据类型
print("\ncustomers表中customer_id的类型:", customers['customer_id'].dtype)
print("orders表中customer_id的类型:", orders['customer_id'].dtype)
# 4. 预处理 - 填充缺失值或转换类型
orders['customer_id'] = orders['customer_id'].fillna(0).astype(int)
customers['customer_id'] = customers['customer_id'].astype(int)
2. 合并后的验证
# 合并数据
merged_data = pd.merge(orders, customers, on='customer_id', how='left')
# 1. 检查合并后的行数
print("\n合并后的行数:", len(merged_data))
print("左表行数:", len(orders))
print("右表行数:", len(customers))
# 2. 检查匹配情况
print("\n成功匹配的记录数:", len(merged_data[~merged_data['name'].isnull()]))
print("未匹配的记录数:", len(merged_data[merged_data['name'].isnull()]))
# 3. 检查重复列
print("\n合并后的列名:", merged_data.columns.tolist())
# 4. 抽样检查
print("\n合并数据抽样检查:")
print(merged_data.sample(3, random_state=42))
3. 性能优化技巧
# 1. 指定合并键的数据类型
orders['customer_id'] = orders['customer_id'].astype('int32')
customers['customer_id'] = customers['customer_id'].astype('int32')
# 2. 减少合并前的数据量
# 只选择需要的列
customers_filtered = customers[['customer_id', 'name', 'tier']]
# 3. 使用更高效的合并方法
# 对于大型数据集,可以考虑使用dask或pyspark
# 4. 分块合并
def chunk_merge(left, right, on, chunksize=10000, how='left'):
chunks = []
for i in range(0, len(left), chunksize):
chunk = pd.merge(
left.iloc[i:i+chunksize],
right,
on=on,
how=how
)
chunks.append(chunk)
return pd.concat(chunks, axis=0)
# 5. 使用索引加速
orders_indexed = orders.set_index('customer_id')
customers_indexed = customers.set_index('customer_id')
%timeit orders_indexed.join(customers_indexed, how='left')
4. 常见问题及解决方案
问题1: 合并后行数异常增多
- 原因: 合并键在其中一个表中不唯一
- 解决: 检查键的唯一性
df.duplicated().sum()
问题2: 合并后出现大量nan值
- 原因: 键不匹配或使用了外连接
- 解决: 检查键的匹配情况或使用内连接
问题3: 合并速度非常慢
- 原因: 数据集太大或键的数据类型不一致
- 解决: 优化数据类型,分块处理,或使用更高效的工具
问题4: 列名冲突
- 原因: 两个表有相同列名但非合并键
- 解决: 使用suffixes参数或提前重命名列
问题5: 内存不足
- 原因: 数据集太大
- 解决: 使用分块处理,或者考虑使用dask等工具
以上就是python进行数据拆分和合并的超详细指南的详细内容,更多关于python数据拆分和合并的资料请关注代码网其它相关文章!
发表评论