from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
print('점수는 '+ str(tree.score(X_train, y_train)))
y_pred = tree.predict(X_test)
print('정확도는', accuracy_score(y_test, y_pred)*100)
print('윈슬렛')
if clf.predict([winslet])[0] == 0:
print('사망','--->', max(clf.predict_proba([winslet])[0]))
else:
print('생존','--->', max(clf.predict_proba([winslet])[0]))
print('디카프리오')
if clf.predict([dicaprio])[0] == 0:
print('사망','--->', max(clf.predict_proba([dicaprio])[0]))
else:
print('생존','--->', max(clf.predict_proba([dicaprio])[0]))
training_accuracy = []
test_accuracy = []
# 1 에서 30 까지 depth_settings 를 적용
k_settings = range(1, 30)
for k in k_settings:
# 모델 생성
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
# 훈련 세트 정확도 저장
training_accuracy.append(clf.score(X_train, y_train))
# 테스트 정확도 저장
test_accuracy.append(clf.score(X_test, y_test))
plt.figure(figsize=[20,10])
plt.plot(k_settings, training_accuracy, label="training_accuracy")
plt.plot(k_settings, test_accuracy, label="test_accuracy ")
plt.ylabel("accuracy")
plt.xlabel("k")
plt.legend()