일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | 5 | ||
6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 28 | 29 | 30 | 31 |
- bccard
- postorder
- 자료구조
- 대이터
- broscoding
- classification
- tensorflow
- 데이터전문기관
- inorder
- CES 2O21 참가
- CES 2O21 참여
- web 개발
- vscode
- KNeighborsClassifier
- pycharm
- mglearn
- 재귀함수
- 결합전문기관
- web 사진
- html
- web
- 웹 용어
- cudnn
- discrete_scatter
- C언어
- 머신러닝
- java역사
- paragraph
- web 용어
- Keras
- Today
- Total
목록분류 전체보기 (689)
bro's coding
from sklearn.base import BaseEstimator, ClassifierMixin class Myclassifier(BaseEstimator,ClassifierMixin): def __init__(self): # __init__ 메소드에 필요한 모든 매개변수를 나열함 result=0 def fit(self,X,y): # fit 메소드는 X와 y매개변수만을 갖음 # 모델 학습 return self def predict(self,X): # X만 받음 pred_y=np.zeros(len(X))+self.result return pred_y score 등을 만들지 않아도 사용 할 수 있다. from sklearn.base import BaseEstimator, TransformerMixin ,..

import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() X=cancer.data y=cancer.target plt.scatter(X[:,0],X[:,2]) plt.axis('equal') np.corrcoef(X[:,0],X[:,2]) array([[1. , 0.99785528], [0.99785528, 1. ]]) mat=np.corrcoef(X.T) mat plt.imshow(mat,vmin=-1,vmax=1,cmap='bwr') plt.colorbar() idx = [0, 2, 3, 12, 13, 20, 22, 23, 1, ..
편의성을 위해 사용 import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split # data 준비 iris=load_iris() X_train,X_test,y_train,y_test=train_test_split(iris.data,iris.target) from sklearn.preprocessing import MinMaxScaler from sklearn.decomposition import..

정밀도 : 에측한거중에 얼마나 잘 맞춤 precision 재현율 : 실제 한거 중에 얼마나 잘 맞춤 recall p(positive) 예측값이 1 n(negetive) 예측값이 0 t(true) 맞았다 f(false) 틀렸다 TN : 예측값이 0인데 맞았다 FN : 예측값이 0인데 틀렸다 TP : 예측값이 1인데 맞았다 TN : 예측값이 1인테 틀렸다. # TN FP # FN TP
p(positive) 예측값이 1 n(negetive) 예측값이 0 t(true) 맞았다 f(false) 틀렸다 TN : 예측값이 0인데 맞았다 FN : 예측값이 0인데 틀렸다 TP : 예측값이 1인데 맞았다 TN : 예측값이 1인테 틀렸다. # TN FP # FN TP from sklearn.metrics import confusion_matrix cmat=confusion_matrix(y_test,pred_y) cmat array([[15, 0, 0], [ 0, 10, 3], [ 0, 0, 10]], dtype=int64) 0,0 기준 [23, 0] [0 ,15] 1,1 기준 [25 , 0] [3 , 10] 2,2 기준 [25, 3] [0 ,10] -기준으로 행 열 선 긋고 행에 해당 하는 수는 (1..
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_iris iris=load_iris() from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression # 하나의 sample만 test data가 되고 나머지는 train data가 된다. # LOOCV(leave one out cross validation) from sklearn.model_selection import LeaveOneOut loo=LeaveOneOut() scores=cross..

from sklearn.datasets import load_breast_cancer cancer=load_breast_cancer() score=[] from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier model=KNeighborsClassifier() scores=cross_val_score(model,cancer.data,cancer.target) score.append(scores.mean()) from sklearn.linear_model import LogisticRegression model=LogisticRegression() scores=cross_val_s..

원본 DATA의 비율을 유지한다. 셔플을 하지 않았기 때문에 결과값은 항상 같다 from sklearn.model_selection import StratifiedKFold kfold=StratifiedKFold(3) socres1=cross_val_score(LogisticRegression(),iris.data,iris.target,cv=kfold) socres1 # array([0.96078431, 0.92156863, 0.95833333])
from sklearn.model_selection import KFold from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.datasets import load_iris iris=load_iris() kfold=KFold(3) # shuffle = False from sklearn.model_selection import KFold kfold=KFold(3) # shuffle = False socres1=cross_val_score(LogisticRegression(),iris.data,iris.target,cv=kfold) socres1 # a..

교차검증 - shuffle을 적용하지 않고, 분류의 경우 원본 비율을 유지(stratified)(옵션으로 바꿀 수 있음) import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_iris iris=load_iris() from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score model=DecisionTreeClassifier(max_features=2) scores..