機器學習_ML_整體學習
整體方法的背後想法是將不同的分類器組合成一個整合分類器,它的效能會比組成它的個別分類器來的更好!大概就是三個臭皮匠勝過一個諸葛亮的意思!
整體方法,就是多數決(majority voting)!多數決就是從大多數分類器的預測類別標籤來決定我們的預測!
在多元的情況下稱為最高票制(plurality vote)
實做機率密度函數
from scipy.misc import comb
import math
def ensemble_error(n_classifier, error):
k_start = math.ceil(n_classifier / 2.0)
probs = [comb(n_classifier, k) * error**k * (1-error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
ensemble_error(n_classifier=11, error=0.25)
分類器的比較
如果我們預設了所有的權重都是一致的情況下我們假設有兩個類別{0,1}在經過三個分類器運算之後得到的結果是(0,1,1)
那,結果就是1
如果得到的是(0,0,1)
那,結果就是0
但是如果我們加入了權重之後,就不一樣了。
我們假設三個分類器的權重分別是0.2,o.2,o.6
那在計算(0,0,1)的結果,就會是1了,因為0.6是0.2的3倍
所以機器會看成是(0,0,1,1,1)
但如果,我們今天不考慮結果,而是考慮要每個分類器對結果猜測的機率!
那就又不一樣了。
加入權重以結果來比較
improt numpy as np
np.argmax(np.bincount([0,0,1], weights=[0.2,0.2,0.6]))
加入權重以機率來比較
ex = np.array([[0.9,0.1],[0.8,0.2],[0.6,0.4]])
p = np.average(ex, axis=0, weights=[0.2,0.2,0.6])
p
np.argmax(p)
實作MajorityVoteClassifier
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator,ClassifierMixin):
"""
參數說明
classifiers:目標標籤,類型為陣列
vote:以標籤來做投票還是要以各分類器計算機率做加總投標
{classlabel, probability}
weights:權重,類型為陣列,各演算法的所屬權重。
"""
def __init__(self, classifiers, vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key:value for key, value
in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
"""
參數說明
X:訓練資料集
y:訓練資料集的目標標籤
return object
"""
# 當vote不在預設項目的話報錯
if self.vote not in ('probability', 'classlabel'):
raise ValueError("vote must be 'probability' or 'classlabel'"
"; got (vote=%r)"
% self.vote)
# 當權重項次跟標籤項次不符的時候報錯
if self.weights and len(self.weights) != len(self.classifiers):
raise ValueError('Number of classifiers and weights must be equal'; got %d weights, %d classifiers' %(len(self.weights), len(self.classifiers)))
#透過使用LabelEncoder來確認超始為0
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X, self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
"""
參數說明
X:陣列,[特徵]
return maj_vote:陣列,回傳預測目標標籤
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X), axis=1)
else:
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predicitions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
"""
參數說明
X:陣列
return avg_groba:陣列,[標籤]
"""
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas, axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
if not deep:
return super(majorityVoteClassifier, self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.name_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
開始測試
帶入鳶尾花數據
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris() # 讀入預載的鳶尾花數據
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
分割訓練數據
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1)
選定三個分類器:邏輯斯迴歸,決策樹、k最近鄰居分類器
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty='l2',
C=0.001,
random_state=0)
clf2 = DecisionTreeClassifier(max_depth=1,
criterion='entropy',
random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1,
p=2,
metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()],
['clf', clf1]])
pipe2 = Pipeline([['sc', StandardScaler()],
['clf', clf3]])
clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']
print('10-fold cross validation:\n')
for clf, label in zip([pipe1, clf2, pipe2], clf_labels):
scores = cross_val_score(estimator=clf,
X=X_train,
y=y_train,
cv=10,
scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
在不加入整體學習器之前的平均狀況約為92%與93%的平均準確
加入整體學習器
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe2])
clf_labels += ['Majority voting'] # 再加一個label
all_clf = [pipe1, clf2, pipe2, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
這時候我們會發現,平均準確率提高到97%,重點是只用了兩個特徵!
透過ROC曲線微調
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
colors = ['black', 'orange', 'blue', 'green']
linestyles = [':', '--', '-.', '-']
for clf, label, clr, ls in zip(all_clf, clf_labels, colors, linestyles):
y_pred = clf.fit(X_train, y_train).predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_test, y_score=y_pred)
roc_auc = auc(x=fpr, y=tpr)
plt.plot(fpr, tpr, color=clr, linestyle=ls, label='%s (auc=%0.2f)' % (label, roc_auc))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
predict_probal:回傳預測類別機率(標籤1的機率,標籤2的機率)
predict:回傳預測標籤
利用網格搜尋來暴力測試
mv_clf.get_params():取得所有參數from sklearn.grid_search import GridSearchCV
params = {'decisiontreeclassifier__max_depth': [1,2],
'pipeline-2__clf__n_neighbors': [1,2,3],
'pipeline-1__clf__penalty': ['l1','l2'],
'pipeline-1__clf__C': [0.001, 0.1, 1.0, 10, 100.0]}
grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')
grid.fit(X_train, y_train)
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f+/-%0.2f %r" % (mean_score, scores.std() /2, params))
grid.best_params_
grid.best_score_
# GridSearchCV本身refit預設為True,故可直接引用確認測試資料集
y_pred = grid.predict(X_test)
# 利用metrics.accuracy來計算準確率
accuracy = metrics.accuracy_score(y_test, y_pred)
print(accuracy)
# 也可以直接來引用最佳分類器
grid.best_estimator_.classifiers
mv_clf = grid.best_estimator_
mv_clf.set_params(**grid.best_estimator_.get_params())
到這邊,我們可以發現到了整體學習器的參數最化是樹深度為1,且正規化參數為1.0!
目前為止的整體學習器,稱為堆疊法,會結合邏輯斯迴歸一起使用!
改以裝袋法測試 Bagging Predictors
取葡萄酒測試資料集
import pandas as pd
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
# 測試一下,養成好習慣
df_wine.head()
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
# 這個案例只看類別為2與3的,所以將1的排除掉!
df_wine = df_wine[df_wine['Class label'] !=1]
y = df_wine['Class label'].values
X = df_wine[['Alcohol', 'Hue']].values
# 分割數據集 6:4
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1)
# import BaggingClassifier
# 用500棵未整理過的決策樹來做基本分類器
from sklearn.ensemble import BaggingClassifier
tree = DecisionTreeClassifier(criterion='entropy',max_depth=None)
bag = BaggingClassifier(base_estimator=tree,
n_estimators=500,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
n_jobs=1,
random_state=1)
from sklearn.metrics import accuracy_score
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision Tree train/test accurancies %.3f/%.3f' % (tree_train, tree_test))
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print('Decision Tree train/test accurancies %.3f/%.3f' % (bag_train, bag_test))
兩個比較一下,似乎透過裝袋法產生的模型對預測數據集的效果比較好一點!
利用適應強化來提升弱學習器效能(boosting)
最長見的實作:AdaBoost(Adaptive Boosting)在強化法中,整體方法是由非常簡單的基本分類器組成。也被稱為弱學習器。(weak learner)
強化法的關鍵想法是把重心放在訓練數據集中很難分類的樣本,亦即讓弱學習器能從錯誤中學習!
強化法採取的是,對訓練資料集不放回式的隨機抽樣,而裝袋法是放回式!
單層決策樹就是一個典型的弱學習器原始強化法是將分批抽出樣本不放回之後以多數決來決定,而AdaBoost是利用完成的訓練資料集來做,在每次的迭代過程中會用上一輪整體方法裡面的弱學習器的錯誤預測重新給定加權,再用它來建立更好更強的分類器!
scikit-learn實作
from sklearn.ensemble import AdaBoostClassifier
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=1)
ada = AdaBoostClassifier(base_estimator=tree,
n_estimators=500,
learning_rate=0.1,
random_state=0)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision Tree train/test accurancies %.3f/%.3f' % (tree_train, tree_test))
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print('Decision Tree train/test accurancies %.3f/%.3f' % (ada_train, ada_test))
這樣可以看到,AdaBoost能夠完美的正確預測所有訓練數據,對未知的測試資料也有比單層決策樹要好的效果!
一直重覆使用相同的測試數據來選擇模型,是一種很不好的做法!另外,透過整體學習器雖然可以有效的提高預測,但其付出的成本是否符合就另當別論了!
沒有留言:
張貼留言