#回歸
x = [[3,6],[6,9],[9,12],[12,15],[15,18],[18,21],[21,23]]
# y = [[3],[6],[9],[12],[15],[18],[21]]
y = [3,6,9,12,15,18,21]
x1 = [[2,4],[4,6],[6,8],[8,10],[10,12]]
# y1 = [[2],[4],[6],[8],[10]]
y1 = [2,4,6,8,10]
#分類
from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
# print(iris)
iris_x = iris.data
iris_y = iris.target
# print(iris_x)
x_train,x_test,y_train,y_test = train_test_split(iris_x,iris_y,test_size=0.3) #切割數據集
#adaboost 自適應提升算法
# #分類
# from sklearn.ensemble import AdaBoostClassifier
# adaboost = AdaBoostClassifier ()
# adaboost.fit(x_train,y_train)
# print(adaboost.score(x_test,y_test))
# print(adaboost.predict(x_test))
# #回歸
# from sklearn.ensemble import AdaBoostRegressor
# adaboost = AdaBoostRegressor ()
# adaboost.fit(x,y)
# print(adaboost.score(x1,y1))
# print(adaboost.predict(x1))
#GBDT 梯度提升決策樹
#分類
# from sklearn.ensemble import GradientBoostingClassifier
# grad = GradientBoostingClassifier ()
# grad.fit(x_train,y_train)
# print(grad.score(x_test,y_test))
# print(grad.predict(x_test))
#回歸
# from sklearn.ensemble import GradientBoostingRegressor
# grad = GradientBoostingRegressor ()
# grad.fit(x,y)
# print(grad.score(x1,y1))
# print(grad.predict(x1))
#bagging 隨機深林
# #分類
# from sklearn.ensemble import RandomForestClassifier
# bagging = RandomForestClassifier ()
# bagging.fit(x_train,y_train)
# print(bagging.score(x_test,y_test))
# print(bagging.predict(x_test))
# 回歸
# from sklearn.ensemble import RandomForestRegressor
# bagging = RandomForestRegressor ()
# bagging.fit(x,y)
# print(bagging.score(x1,y1))
# print(bagging.predict(x1))