일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | |||||
3 | 4 | 5 | 6 | 7 | 8 | 9 |
10 | 11 | 12 | 13 | 14 | 15 | 16 |
17 | 18 | 19 | 20 | 21 | 22 | 23 |
24 | 25 | 26 | 27 | 28 | 29 | 30 |
- 재귀함수
- vscode
- inorder
- CES 2O21 참여
- 머신러닝
- web 용어
- KNeighborsClassifier
- classification
- 결합전문기관
- tensorflow
- 자료구조
- 대이터
- mglearn
- C언어
- CES 2O21 참가
- bccard
- pycharm
- 웹 용어
- web
- postorder
- java역사
- paragraph
- html
- web 사진
- cudnn
- 데이터전문기관
- broscoding
- discrete_scatter
- Keras
- web 개발
- Today
- Total
목록[AI] (189)
bro's coding
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_iris iris=load_iris() from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression # 하나의 sample만 test data가 되고 나머지는 train data가 된다. # LOOCV(leave one out cross validation) from sklearn.model_selection import LeaveOneOut loo=LeaveOneOut() scores=cross..
from sklearn.datasets import load_breast_cancer cancer=load_breast_cancer() score=[] from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier model=KNeighborsClassifier() scores=cross_val_score(model,cancer.data,cancer.target) score.append(scores.mean()) from sklearn.linear_model import LogisticRegression model=LogisticRegression() scores=cross_val_s..
원본 DATA의 비율을 유지한다. 셔플을 하지 않았기 때문에 결과값은 항상 같다 from sklearn.model_selection import StratifiedKFold kfold=StratifiedKFold(3) socres1=cross_val_score(LogisticRegression(),iris.data,iris.target,cv=kfold) socres1 # array([0.96078431, 0.92156863, 0.95833333])
from sklearn.model_selection import KFold from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.datasets import load_iris iris=load_iris() kfold=KFold(3) # shuffle = False from sklearn.model_selection import KFold kfold=KFold(3) # shuffle = False socres1=cross_val_score(LogisticRegression(),iris.data,iris.target,cv=kfold) socres1 # a..
교차검증 - shuffle을 적용하지 않고, 분류의 경우 원본 비율을 유지(stratified)(옵션으로 바꿀 수 있음) import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_iris iris=load_iris() from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score model=DecisionTreeClassifier(max_features=2) scores..
from sklearn.manifold import TSNE import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits mnist=load_digits() tsne =TSNE() mnist_tsne=tsne.fit_transform(mnist.data) # 상대적인 거리를 이용해 가까운 것들은 더 가깝게, 먼 것들은 더 멀게 만든다. xmax, ymax = mnist_tsne.max(axis=0) xmin, ymin = mnist_tsne.min(axis=0) colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525", "#A83683", "#4..
from sklearn.neighbors import KNeighborsClassifier knn=KNeighborsClassifier(5) knn.fit(mnist.data,mnist.target) knn.score(mnist.data,mnist.target) # 0.9905397885364496
pca=PCA(2) X_pca=pca.fit_transform(mnist.data) plt.figure(figsize=[10,8]) plt.scatter(X_pca[:,0],X_pca[:,1],c=mnist.target) plt.colorbar() xmax,ymax=X_pca.max(axis=0) xmin,ymin=X_pca.min(axis=0) colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525", "#A83683", "#4E655E", "#853541", "#3A3120", "#535D8E"] plt.figure(figsize=[14,14]) plt.xlim([xmin,xmax]) plt.ylim([xmin,xmax]) for i in ra..
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits mnist=load_digits() display(mnist.keys()) display(dir(mnist)) plt.figure(figsize=[10,10]) for i in range(100): plt.subplot(10,10,i+1) plt.imshow(mnist.images[i],cmap='gray_r') plt.axis('off')
https://broscoding.tistory.com/175 sklearn.datasets.fetch_lfw_people people=fetch_lfw_people(min_faces_per_person=20,resize=0.7) dir(people) people.target_names array(['Alejandro Toledo', 'Alvaro Uribe', 'Amelie Mauresmo', 'Andre Agassi', 'Angelina Jolie', 'Ariel Sha.. broscoding.tistory.com from sklearn.decomposition import PCA pca=PCA(100) pca.fit(X_people) X_pca=pca.transform(X_people) pca_co..