如何在python中实现数据的最优分箱

如何在python中实现数据的最优分箱,第1张

Monotonic Binning with Python

Monotonic binning is a data preparation technique widely used in scorecard development and is usually implemented with SAS. Below is an attempt to do the monotonic binning with python.

Python Code:

# import packages

import pandas as pd

import numpy as np

import scipy.stats.stats as stats

# import data

data = pd.read_csv("/home/liuwensui/Documents/data/accepts.csv", sep = ",", header = 0)

# define a binning function

def mono_bin(Y, X, n = 20):

# fill missings with median

X2 = X.fillna(np.median(X))

r = 0

while np.abs(r) <1:

d1 = pd.DataFrame({"X": X2, "Y": Y, "Bucket": pd.qcut(X2, n)})

d2 = d1.groupby('Bucket', as_index = True)

r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)

n = n - 1

d3 = pd.DataFrame(d2.min().X, columns = ['min_' + X.name])

d3['max_' + X.name] = d2.max().X

d3[Y.name] = d2.sum().Y

d3['total'] = d2.count().Y

d3[Y.name + '_rate'] = d2.mean().Y

d4 = (d3.sort_index(by = 'min_' + X.name)).reset_index(drop = True)

print "=" * 60

print d4

mono_bin(data.bad, data.ltv)

mono_bin(data.bad, data.bureau_score)

mono_bin(data.bad, data.age_oldest_tr)

mono_bin(data.bad, data.tot_tr)

mono_bin(data.bad, data.tot_income)

Output:

============================================================

min_ltv max_ltv bad total bad_rate

00 83 88884 0.099548

1 84 92 137905 0.151381

2 93 98 175851 0.205640

3 99 102 173814 0.212531

4 103 108 194821 0.236297

5 109 116 194769 0.252276

6 117 176 235793 0.296343

============================================================

min_bureau_score max_bureau_score bad total bad_rate

0 443 630 325747 0.435074

1 631 655 242721 0.335645

2 656 676 173721 0.239945

3 677 698 245 1059 0.231350

4 699 709 64427 0.149883

5 710 732 73712 0.102528

6 733 763 53731 0.072503

7 764 848 21719 0.029207

============================================================

min_age_oldest_tr max_age_oldest_tr bad total bad_rate

0 1 59 319987 0.323202

1 60108 235975 0.241026

2109142 282 1199 0.235196

3143171 142730 0.194521

4172250 125976 0.128074

5251588 93970 0.095876

============================================================

min_tot_tr max_tot_tr bad total bad_rate

0 0 8 378 1351 0.279793

1 9 13 247 1025 0.240976

2 14 18 240 1185 0.202532

3 19 25 165 1126 0.146536

4 26 77 166 1150 0.144348

============================================================

min_tot_income max_tot_income bad total bad_rate

00.00 2000.00 323 1217 0.265407

1 2002.00 2916.67 259 1153 0.224631

2 2919.00 4000.00 226 1150 0.196522

3 4001.00 5833.33 231 1186 0.194772

4 5833.34 8147166.66 157 1131 0.138815

>>>list =[None,None,None,None,"a","b","c",None,"d",12,None,2,4,5,4]>>>list = list[4:]>>>len(list)11>>>list['a', 'b', 'c', None, 'd', 12, None, 2, 4, 5, 4]>>>#如果你的list 格式是相同的 比如前面4个都是None,这个格式是固定的,那么切片很容易解决

def calc_chiSquare(sampleSet, feature, target):

    '''

    计算某个特征每种属性值的卡方统计量

    params:

        sampleSet: 样本集

        feature: 目标特征

        target: 目标Y值 (0或1) Y值为二分类变量

    return:

        卡方统计量dataframe

        feature: 特征名称

        act_target_cnt: 实际坏样本数

        expected_target_cnt:期望坏样本数

        chi_square:卡方统计量

    '''

    # 计算样本期望频率

    target_cnt = sampleSet[target].sum()

    sample_cnt = len(sampleSet[target])

    expected_ratio = target_cnt * 1.0/sample_cnt

    # 对变量按属性值从大到小排序

    df = sampleSet[[feature, target]]

    col_value = list(set(df[feature])) 

    # 计算每一个属性值对应的卡方统计量等信息

    chi_list = []target_list = []expected_target_list = []

    for value in col_value:

        df_target_cnt = df.loc[df[feature] == value, target].sum()

        df_cnt = len(df.loc[df[feature] == value, target])

        expected_target_cnt = df_cnt * expected_ratio

        chi_square = (df_target_cnt - expected_target_cnt)**2 / expected_target_cnt

        chi_list.append(chi_square)

        target_list.append(df_target_cnt)

        expected_target_list.append(expected_target_cnt)

    # 结果输出到dataframe, 对应字段为特征属性值, 卡方统计量, 实际坏样本量, 期望坏样本量

    chi_stats = pd.DataFrame({feature:col_value, 'chi_square':chi_list,

                              'act_target_cnt':target_list, 'expected_target_cnt':expected_target_list})

    return chi_stats[[feature, 'act_target_cnt', 'expected_target_cnt', 'chi_square']]

def chiMerge_maxInterval(chi_stats, feature, maxInterval=5):

    '''

    卡方分箱合并--最大区间限制法

    params:

        chi_stats: 卡方统计量dataframe

        feature: 目标特征

        maxInterval:最大分箱数阈值

    return:

        卡方合并结果dataframe, 特征分割split_list

    '''

    group_cnt = len(chi_stats)

    split_list = [chi_stats[feature].min()]

    # 如果变量区间超过最大分箱限制,则根据合并原则进行合并

    while(group_cnt >maxInterval):

        min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]

        # 如果分箱区间在最前,则向下合并

        if min_index == 0:

            chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)

        # 如果分箱区间在最后,则向上合并

        elif min_index == group_cnt-1:

            chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)

        # 如果分箱区间在中间,则判断与其相邻的最小卡方的区间,然后进行合并

        else:

            if chi_stats.loc[min_index-1, 'chi_square'] >chi_stats.loc[min_index+1, 'chi_square']:

                chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)

            else:

                chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)

        group_cnt = len(chi_stats)

    chiMerge_result = chi_stats

    split_list.extend(chiMerge_result[feature].tolist())

    return chiMerge_result, split_list

def chiMerge_minChiSquare(chi_stats, feature, dfree=4, cf=0.1, maxInterval=5):

    '''

    卡方分箱合并--卡方阈值法

    params:

        chi_stats: 卡方统计量dataframe

        feature: 目标特征

        maxInterval: 最大分箱数阈值, default 5

        dfree: 自由度, 最大分箱数-1, default 4

        cf: 显著性水平, default 10%

    return:

        卡方合并结果dataframe, 特征分割split_list

    '''

    threshold = get_chiSquare_distuibution(dfree, cf)

    min_chiSquare = chi_stats['chi_square'].min()

    group_cnt = len(chi_stats)

    split_list = [chi_stats[feature].min()]

    # 如果变量区间的最小卡方值小于阈值,则继续合并直到最小值大于等于阈值

    while(min_chiSquare <threshold and group_cnt >maxInterval):

        min_index = chi_stats[chi_stats['chi_square']==chi_stats['chi_square'].min()].index.tolist()[0]

        # 如果分箱区间在最前,则向下合并

        if min_index == 0:

            chi_stats = merge_chiSquare(chi_stats, min_index+1, min_index)

        # 如果分箱区间在最后,则向上合并

        elif min_index == group_cnt-1:

            chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)

        # 如果分箱区间在中间,则判断与其相邻的最小卡方的区间,然后进行合并

        else:

            if chi_stats.loc[min_index-1, 'chi_square'] >chi_stats.loc[min_index+1, 'chi_square']:

                chi_stats = merge_chiSquare(chi_stats, min_index, min_index+1)

            else:

                chi_stats = merge_chiSquare(chi_stats, min_index-1, min_index)

        min_chiSquare = chi_stats['chi_square'].min()

        group_cnt = len(chi_stats)

    chiMerge_result = chi_stats

    split_list.extend(chiMerge_result[feature].tolist())

    return chiMerge_result, split_list

def get_chiSquare_distuibution(dfree=4, cf=0.1):

    '''

    根据自由度和置信度得到卡方分布和阈值

    params:

        dfree: 自由度, 最大分箱数-1, default 4

        cf: 显著性水平, default 10%

    return:

        卡方阈值

    '''

    percents = [0.95, 0.90, 0.5, 0.1, 0.05, 0.025, 0.01, 0.005]

    df = pd.DataFrame(np.array([chi2.isf(percents, df=i) for i in range(1, 30)]))

    df.columns = percents

    df.index = df.index+1

    # 显示小数点后面数字

    pd.set_option('precision', 3)

    return df.loc[dfree, cf]

def merge_chiSquare(chi_result, index, mergeIndex, a = 'expected_target_cnt',

                    b = 'act_target_cnt', c = 'chi_square'):

    '''

    params:

        chi_result: 待合并卡方数据集

        index: 合并后的序列号

        mergeIndex: 需合并的区间序号

        a, b, c: 指定合并字段

    return:

        分箱合并后的卡方dataframe

    '''

    chi_result.loc[mergeIndex, a] = chi_result.loc[mergeIndex, a] + chi_result.loc[index, a]

    chi_result.loc[mergeIndex, b] = chi_result.loc[mergeIndex, b] + chi_result.loc[index, b]

    chi_result.loc[mergeIndex, c] = (chi_result.loc[mergeIndex, b] - chi_result.loc[mergeIndex, a])**2 /chi_result.loc[mergeIndex, a]

    chi_result = chi_result.drop([index])

    chi_result = chi_result.reset_index(drop=True)

    return chi_result

for col in bin_col:

    chi_stats = calc_chiSquare(exp_f_data_label_dr, col, 'label')

    chiMerge_result, split_list = chiMerge_maxInterval(chi_stats, col, maxInterval=5)

    print(col, 'feature maybe split like this:', split_list)


欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/yw/11567170.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2023-05-17
下一篇 2023-05-17

发表评论

登录后才能评论

评论列表(0条)

保存