# 计数 ct = pd.crosstab(label, feature, margins=True) # 比例 ct_prob = contingency_table.div(ct['All'], axis=0)
# p-value scipy.stats.chi2_contingency(cross_table)[1] # chi^2 scipy.stats.chi2_contingency(cross_table)[0]
from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 skb = SelectKBest(chi2, k=2) skb = skb.fit(cols, feat) skb.get_support()
model = ols('value ~ C(treatments)', data=an_df).fit() anova_table = anova_lm(model, typ = 2) anova_table
from sklearn.preprocessing import LabelEncoder le = LabelEncoder() for col in data.columns: if data[col].dtype == 'object': data[col] = le.fit_transform(data[col])
需要注意LabelEncoder的结果带有数字意义(2大于1,3大于2),对于离散型特征不建议使用这种方法。
from sklearn.preprocessing import OneHotEncoder onehot_encoder = OneHotEncoder() onehot_encoded = onehot_encoder.fit_transform(all_Feats)
import pandas encoded = pd.get_dummies(X) # 对整个dataframe oh = pd.get_dummies(X[col], prefix=col) # 对单列
import xgboost from xgboost import XGBClassifier from sklearn import metrics import matplotlib.pyplot as plt from xgboost import plot_importance train_X, test_X, train_y, test_y = train_test_split(encoded_x, y, test_size=0.2) xgbc = XGBClassifier(eta = 0.2, max_depth = 6) xgbc.fit(train_X, train_y) #, eval_metric = 'logloss', eval_set = evalset) test_predict = xgbc.predict(test_X) metrics.accuracy_score(test_y, test_predict) ax = plot_importance(xgbc) ax.figure.set_size_inches(8,36) plt.show()
import lightgbm as lgb import matplotlib.pyplot as plt from xgboost import plot_importance from sklearn import metrics train_data = lgb.Dataset(train_X, label = train_y) validation_data = lgb.Dataset(test_X, label = test_y) params={ 'learning_rate':0.1, 'lambda_l1':0.1, 'lambda_l2':0.2, 'max_depth':6, 'objective':'multiclass', 'num_class':8, } clf = lgb.train(params, train_data, valid_sets=[validation_data]) y_prob = clf.predict(test_X, num_iteration=clf.best_iteration) y_pred = [list(x).index(max(x)) for x in y_prob] metrics.accuracy_score(test_y, y_pred) columns = test_X.columns.tolist() df = pd.DataFrame() df['feature name'] = columns df['importance'] = clf.feature_importance() df = df.sort_values('importance') df.plot.barh(x = 'feature name',figsize=(10,36))
先安装SHAP:
!pip install shap
以xgboost模型为例:
import shap explainer = shap.TreeExplainer(xgbc) shap_values = explainer.shap_values(test_X) shap.summary_plot(shap_values, test_X, plot_type="bar")