Java教程

实验四 决策树算法及应用

本文主要是介绍实验四 决策树算法及应用,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

作业信息
| 作业要求 | 作业要求|
| 作业目标 | 作业目标 |
| 姓名 | 张海龙 |
| 学号 | 3180701120 |

实验目的
1.理解决策树算法原理,掌握决策树算法框架;
2.理解决策树学习算法的特征选择、树的生成和树的剪枝;
3.能根据不同的数据类型,选择不同的决策树算法;
4.针对特定应用场景及数据,能应用决策树算法解决实际问题。

实验内容
1.设计算法实现熵、经验条件熵、信息增益等方法。
2.实现ID3算法。
3.熟悉sklearn库中的决策树算法;
4.针对iris数据集,应用sklearn的决策树算法进行类别预测。
5.针对iris数据集,利用自编决策树算法进行类别预测。

实验报告要求
1.对照实验内容,撰写实验过程、算法及测试结果;
2.代码规范化:命名规则、注释;
3.分析核心算法的复杂度;
4.查阅文献,讨论ID3、5算法的应用场景;
查询文献,分析决策树剪枝策略。

实验代码
1、
`import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

%matplotlib inline

from sklearn.datasets import load_iris

from sklearn.model_selection import train_test_split

from collections import Counter

import math

from math import log

import pprint`

2、
`def create_data():

datasets = [['青年', '否', '否', '一般', '否'],

['青年', '否', '否', '好', '否'],

['青年', '是', '否', '好', '是'],

['青年', '是', '是', '一般', '是'],

['青年', '否', '否', '一般', '否'],

['中年', '否', '否', '一般', '否'],

['中年', '否', '否', '好', '否'],

['中年', '是', '是', '好', '是'],

['中年', '否', '是', '非常好', '是'],

['中年', '否', '是', '非常好', '是'],

['老年', '否', '是', '非常好', '是'],

['老年', '否', '是', '好', '是'],

['老年', '是', '否', '好', '是'],

['老年', '是', '否', '非常好', '是'],

['老年', '否', '否', '一般', '否'],

]

labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']

返回数据集和每个维度的名称

return datasets, labels`

3、

datasets, labels = create_data()

4、
train_data = pd.DataFrame(datasets, columns=labels)

5、
train_data

6、
`# 熵

def calc_ent(datasets):

data_length = len(datasets)

label_count = {}

for i in range(data_length):

label = datasets[i][-1]

if label not in label_count:

label_count[label] = 0

label_count[label] += 1

ent = -sum([(p / data_length) * log(p / data_length, 2)

for p in label_count.values()])

return ent

def entropy(y):

"""

Entropy of a label sequence

"""

hist = np.bincount(y)

ps = hist / np.sum(hist)

return -np.sum([p * np.log2(p) for p in ps if p > 0])

经验条件熵

def cond_ent(datasets, axis=0):

data_length = len(datasets)

feature_sets = {}

for i in range(data_length):

feature = datasets[i][axis]

if feature not in feature_sets:

feature_sets[feature] = []

feature_sets[feature].append(datasets[i])

cond_ent = sum(

[(len(p) / data_length) * calc_ent(p) for p in feature_sets.values()])

return cond_ent

信息增益

def info_gain(ent, cond_ent):

return ent - cond_ent

def info_gain_train(datasets):

count = len(datasets[0]) - 1

ent = calc_ent(datasets)

ent = entropy(datasets)

best_feature = []

for c in range(count):

c_info_gain = info_gain(ent, cond_ent(datasets, axis=c))

best_feature.append((c, c_info_gain))

print('特征({}) - info_gain - {:.3f}'.format(labels[c], c_info_gain))

比较大小

best_ = max(best_feature, key=lambda x: x[-1])

return '特征({})的信息增益最大,选择为根节点特征'.format(labels[best_[0]])`

7、

info_gain_train(np.array(datasets))

8、

`# 定义节点类 二叉树

class Node:

def init(self, root=True, label=None, feature_name=None, feature=None):

self.root = root

self.label = label

self.feature_name = feature_name

self.feature = feature

self.tree = {}

self.result = {

'label:': self.label,

'feature': self.feature,

'tree': self.tree

}

def repr(self):

return '{}'.format(self.result)

def add_node(self, val, node):

self.tree[val] = node

def predict(self, features):

if self.root is True:

return self.label

return self.tree[features[self.feature]].predict(features)

class DTree:

def init(self, epsilon=0.1):

self.epsilon = epsilon

self.tree = {}

@staticmethod

def calc_ent(datasets):

data_length = len(datasets)

label_count = {}

for i in range(data_length):

label = datasets[i][-1]

if label not in label_count:

label_count[label] = 0

label_count[label] += 1

ent = -sum([(p / data_length) * log(p / data_length, 2)

for p in label_count.values()])

return ent

经验条件熵

def cond_ent(self, datasets, axis=0):

data_length = len(datasets)

feature_sets = {}

for i in range(data_length):

feature = datasets[i][axis]

if feature not in feature_sets:

feature_sets[feature] = []

feature_sets[feature].append(datasets[i])

cond_ent = sum([(len(p) / data_length) * self.calc_ent(p)

for p in feature_sets.values()])

return cond_ent

信息增益

@staticmethod

def info_gain(ent, cond_ent):

return ent - cond_ent

def info_gain_train(self, datasets):

count = len(datasets[0]) - 1

ent = self.calc_ent(datasets)

best_feature = []

for c in range(count):

c_info_gain = self.info_gain(ent, self.cond_ent(datasets, axis=c))

best_feature.append((c, c_info_gain))

比较大小

best = max(best_feature, key=lambda x: x[-1])

return best_

def train(self, train_data):

"""

input:数据集D(DataFrame格式),特征集A,阈值eta

output:决策树T

"""

_, y_train, features = train_data.iloc[:, :

-1], train_data.iloc[:,

-1], train_data.columns[:

-1]

1,若D中实例属于同一类Ck,则T为单节点树,并将类Ck作为结点的类标记,返回T

if len(y_train.value_counts()) == 1:

return Node(root=True, label=y_train.iloc[0])

2, 若A为空,则T为单节点树,将D中实例树最大的类Ck作为该节点的类标记,返回T

if len(features) == 0:

return Node(

root=True,

label=y_train.value_counts().sort_values(

ascending=False).index[0])

3,计算最大信息增益 同5.1,Ag为信息增益最大的特征

max_feature, max_info_gain = self.info_gain_train(np.array(train_data))

max_feature_name = features[max_feature]

4,Ag的信息增益小于阈值eta,则置T为单节点树,并将D中是实例数最大的类Ck作为该节点的类标记,返

if max_info_gain < self.epsilon:

return Node(

root=True,

label=y_train.value_counts().sort_values(

ascending=False).index[0])

5,构建Ag子集

node_tree = Node(

root=False, feature_name=max_feature_name, feature=max_feature)

feature_list = train_data[max_feature_name].value_counts().index

for f in feature_list:

sub_train_df = train_data.loc[train_data[max_feature_name] ==

f].drop([max_feature_name], axis=1)

6, 递归生成树

sub_tree = self.train(sub_train_df)

node_tree.add_node(f, sub_tree)

pprint.pprint(node_tree.tree)

return node_tree

def fit(self, train_data):

self._tree = self.train(train_data)

return self._tree

def predict(self, X_test):

return self._tree.predict(X_test)`

9、

`datasets, labels = create_data()

data_df = pd.DataFrame(datasets, columns=labels)

dt = DTree()

tree = dt.fit(data_df)
`

10、

tree

11、

dt.predict(['老年', '否', '否', '一般'])

12、

`# data

def create_data():

iris = load_iris()

df = pd.DataFrame(iris.data, columns=iris.feature_names)

df['label'] = iris.target

df.columns = [

'sepal length', 'sepal width', 'petal length', 'petal width', 'label'

]

data = np.array(df.iloc[:100, [0, 1, -1]])

print(data)

return data[:, :2], data[:, -1]

X, y = create_data()

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

`

13、

`from sklearn.tree import DecisionTreeClassifier

from sklearn.tree import export_graphviz

import graphviz
`

14、`clf = DecisionTreeClassifier()

clf.fit(X_train, y_train,)
`

15、

clf.score(X_test, y_test)

16、

`tree_pic = export_graphviz(clf, out_file="mytree.pdf")

with open('mytree.pdf') as f:

dot_graph = f.read()
`

17、

graphviz.Source(dot_graph)

18、

`from sklearn.tree import DecisionTreeClassifier

from sklearn import preprocessing

import numpy as np

import pandas as pd

from sklearn import tree

import graphviz

features = ["年龄", "有工作", "有自己的房子", "信贷情况"]

X_train = pd.DataFrame([

["青年", "否", "否", "一般"],

["青年", "否", "否", "好"],

["青年", "是", "否", "好"],

["青年", "是", "是", "一般"],

["青年", "否", "否", "一般"],

["中年", "否", "否", "一般"],

["中年", "否", "否", "好"],

["中年", "是", "是", "好"],

["中年", "否", "是", "非常好"],

["中年", "否", "是", "非常好"],

["老年", "否", "是", "非常好"],

["老年", "否", "是", "好"],

["老年", "是", "否", "好"],

["老年", "是", "否", "非常好"],

["老年", "否", "否", "一般"]

])

y_train = pd.DataFrame(["否", "否", "是", "是", "否",

"否", "否", "是", "是", "是",

"是", "是", "是", "是", "否"])

数据预处理

le_x = preprocessing.LabelEncoder()

le_x.fit(np.unique(X_train))

X_train = X_train.apply(le_x.transform)

le_y = preprocessing.LabelEncoder()

le_y.fit(np.unique(y_train))

y_train = y_train.apply(le_y.transform)

调用sklearn.DT建立训练模型

model_tree = DecisionTreeClassifier()

model_tree.fit(X_train, y_train)

可视化

dot_data = tree.export_graphviz(model_tree, out_file=None,

feature_names=features,

class_names=[str(k) for k in np.unique(y_train)],

filled=True, rounded=True,

special_characters=True)

graph = graphviz.Source(dot_data)

graph
`
实验截图
1、

2、

3、

4、

5、

6、

7、

8、

9、

10、

11、

12、

13、

14、

15、

16、

实验小结
本次实验后,能熟练应用决策树算法解决实际问题,本次实验后理解了决策树的原理,能根据不同的数据类型,选择不同的决策树算法;能针对特定应用场景及数据,应用决策树算法解决实际问题。决策树计算复杂度不高,输出结果易于理解,对中间值的缺失不敏感,可以处理不相关特征数据;但也有缺点:可能会产生过度匹配问题。

这篇关于实验四 决策树算法及应用的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!