ML&DL(수정 중)
트리 알고리즘-2
nimij
2022. 3. 22. 01:10
In [3]:
import pandas as pd
wine = pd.read_csv('https://bit.ly/wine-date')
In [5]:
data = wine[['alcohol','sugar','pH']].to_numpy()
target = wine['class'].to_numpy()
In [6]:
from sklearn.model_selection import train_test_split
train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42)
In [7]:
sub_input, val_input, sub_target, val_target = train_test_split(train_input, train_target, test_size=0.2, random_state=42)
In [8]:
print(sub_input.shape,val_input.shape)
(4157, 3) (1040, 3)
In [9]:
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state =42)
dt.fit(sub_input,sub_target)
print(dt.score(sub_input,sub_target))
print(dt.score(val_input,val_target))
0.9971133028626413
0.864423076923077
교차검증¶
In [10]:
from sklearn.model_selection import cross_validate
scores = cross_validate(dt,train_input,train_target)
print(scores)
{'fit_time': array([0.00778031, 0.00723457, 0.0083878 , 0.00834703, 0.00738025]), 'score_time': array([0.00083661, 0.00065994, 0.00096488, 0.00071311, 0.00075006]), 'test_score': array([0.86923077, 0.84615385, 0.87680462, 0.84889317, 0.83541867])}
In [11]:
import numpy as np
print(np.mean(scores['test_score']))
0.855300214703487
In [12]:
from sklearn.model_selection import StratifiedKFold
scores = cross_validate(dt,train_input,train_target,cv=StratifiedKFold())
print(np.mean(scores['test_score']))
0.855300214703487
In [13]:
#10-폴드 교차검증
splitter = StratifiedKFold(n_splits=10,shuffle=True,random_state=42)
scores = cross_validate(dt,train_input,train_target,cv=splitter)
print(np.mean(scores['test_score']))
0.8574181117533719
In [14]:
from sklearn.model_selection import GridSearchCV
params = {'min_impurity_decrease':[0.0001,0.0002,0.0003,0.0004,0.0005]}
In [15]:
gs = GridSearchCV(DecisionTreeClassifier(random_state=42),params, n_jobs=-1)
In [17]:
gs.fit(train_input,train_target)
Out[17]:
GridSearchCV(cv=None, error_score=nan,
estimator=DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=None,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
presort='deprecated',
random_state=42,
splitter='best'),
iid='deprecated', n_jobs=-1,
param_grid={'min_impurity_decrease': [0.0001, 0.0002, 0.0003,
0.0004, 0.0005]},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)
In [19]:
dt = gs.best_estimator_
print(dt.score(train_input,train_target))
0.9615162593804117
In [20]:
print(gs.best_params_)
{'min_impurity_decrease': 0.0001}
In [21]:
print(gs.cv_results_['mean_test_score'])
[0.86819297 0.86453617 0.86492226 0.86780891 0.86761605]
In [23]:
best_index = np.argmax(gs.cv_results_['mean_test_score'])
print(gs.cv_results_['params'][best_index])
{'min_impurity_decrease': 0.0001}
In [25]:
params = {'min_impurity_decrease':np.arange(0.0001,0.001,0.0001),'max_depth':range(5,20,1),'min_samples_split':range(2,100,10)}
In [26]:
gs = GridSearchCV(DecisionTreeClassifier(random_state=42),params, n_jobs=-1)
gs.fit(train_input,train_target)
Out[26]:
GridSearchCV(cv=None, error_score=nan,
estimator=DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=None,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
presort='deprecated',
random_state=42,
splitter='best'),
iid='deprecated', n_jobs=-1,
param_grid={'max_depth': range(5, 20),
'min_impurity_decrease': array([0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008,
0.0009]),
'min_samples_split': range(2, 100, 10)},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=0)
In [27]:
print(gs.best_params_)
{'max_depth': 14, 'min_impurity_decrease': 0.0004, 'min_samples_split': 12}
In [28]:
print(np.max(gs.cv_results_['mean_test_score']))
0.8683865773302731
랜덤서치¶
In [29]:
from scipy.stats import uniform, randint
In [30]:
rgen = randint(0,10)
rgen.rvs(10)
Out[30]:
array([1, 1, 4, 0, 7, 2, 1, 2, 8, 2])
In [31]:
np.unique(rgen.rvs(1000), return_counts=True)
Out[31]:
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
array([ 88, 94, 75, 110, 108, 105, 112, 100, 104, 104]))
In [32]:
ugen = uniform(0,1)
ugen.rvs(10)
Out[32]:
array([0.68725777, 0.89379914, 0.68522298, 0.30310734, 0.51969805,
0.45535335, 0.44037421, 0.91468916, 0.32236632, 0.92493381])
In [37]:
params = {'min_impurity_decrease':uniform(0.0001,0.001),'max_depth':randint(20,50),'min_samples_split':randint(2,25),'min_samples_leaf':randint(1,25)}
In [38]:
from sklearn.model_selection import RandomizedSearchCV
gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100,n_jobs=-1,random_state=42)
gs.fit(train_input,train_target)
Out[38]:
RandomizedSearchCV(cv=None, error_score=nan,
estimator=DecisionTreeClassifier(ccp_alpha=0.0,
class_weight=None,
criterion='gini',
max_depth=None,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
presort='deprecated',
random_state=42,
splitter='best'),...
'min_impurity_decrease': <scipy.stats._distn_infrastructure.rv_frozen object at 0x7f74668af1d0>,
'min_samples_leaf': <scipy.stats._distn_infrastructure.rv_frozen object at 0x7f746689f250>,
'min_samples_split': <scipy.stats._distn_infrastructure.rv_frozen object at 0x7f7466a93a10>},
pre_dispatch='2*n_jobs', random_state=42, refit=True,
return_train_score=False, scoring=None, verbose=0)
In [39]:
print(gs.best_params_)
{'max_depth': 39, 'min_impurity_decrease': 0.00034102546602601173, 'min_samples_leaf': 7, 'min_samples_split': 13}
In [40]:
print(np.mean(gs.cv_results_['mean_test_score']))
0.8639124620567113
In [41]:
dt = gs.best_estimator_
print(dt.score(test_input,test_target))
0.86
In [ ]:
출처 : 혼자 공부하는 머신러닝+딥러닝 github/박해선