일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | ||||||
2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 |
16 | 17 | 18 | 19 | 20 | 21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 |
- html
- mglearn
- 데이터전문기관
- vscode
- postorder
- KNeighborsClassifier
- discrete_scatter
- web 사진
- web 용어
- tensorflow
- paragraph
- CES 2O21 참여
- classification
- 머신러닝
- inorder
- web
- bccard
- cudnn
- C언어
- pycharm
- broscoding
- 자료구조
- 대이터
- java역사
- 웹 용어
- 재귀함수
- CES 2O21 참가
- web 개발
- 결합전문기관
- Keras
- Today
- Total
목록[AI]/python.sklearn (95)
bro's coding
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/cxchZ2/btqDk7ib5bN/kVftFAhKdoWcIdmuqV7KSk/img.png)
def sigmoid(x): return 1/(1+np.exp(-x)) xxx=np.arange(-15,15,0.01) yyy=sigmoid(xxx) plt.plot(xxx,yyy) yyy=sigmoid(xxx*0.5) plt.plot(xxx,yyy) yyy=sigmoid(xxx*10) plt.plot(xxx,yyy) lim(x->inf) =1 lim(x->-inf)=0 lim(x->+-0)=0.5 확률 값을 계산 할 때 사용 신경망에서 가장 중요한 함수 함수의 성격이 매우 중요 p86
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/bCICLj/btqDpfrFbNP/6AuSbOjy3NT89e26zsOBN0/img.png)
import numpy as np import matplotlib.pyplot as plt # data 준비 from sklearn.datasets import make_blobs X,y=make_blobs(400,2,[[0,0],[5,5]],[2,3]) https://broscoding.tistory.com/128 머신러닝.datasets .make_blobs 사용하기 from sklearn.datasets import make_blobs X,y=make_blobs(400,2,[[0,0],[5,5]],[2,3]) # 400 : 행의 갯수 # 2 : 속성의 갯수 2개(축)(전부 X값임) # 중심점의 위치 # 각 중심점에 대한 편차 2, 3 plt.scatter(X[:.. broscoding.tistory..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/pjfrJ/btqDnxthtI8/QSAV6aknVMkqP90iIUaCgK/img.png)
from sklearn.datasets import make_blobs X,y=make_blobs(400,2,[[0,0],[5,5]],[2,3]) # 400 : 행의 갯수 # 2 : 속성의 갯수 2개(축)(전부 X값임) # 중심점의 위치 # 각 중심점에 대한 편차 2, 3 plt.scatter(X[:,0],X[:,1],c=y,s=60,alpha=0.3) plt.colorbar()
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/5lme0/btqDiFshYEu/h2TMUyLDnAy28h6lYVh9yK/img.png)
# 와인의 속성을 사용해서 점수 예측 import numpy as np import matplotlib.pyplot as plt wine=np.loadtxt('winequality-red.csv',skiprows=1,delimiter=';') # x=전체 속성값 X=wine[:,:-1] # y=와인 등급 y=wine[:,-1] # lienar regression from sklearn.linear_model import LinearRegression model=LinearRegression() model.fit(X,y) #result w=model.coef_ b=model.intercept_ print('w=',w) print('b=',b) ''' w= [ 2.49905527e-02 -1.08359026..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/McFeP/btqDiFyJzY3/ItVpkxKpWBsYXYjFd9vpS1/img.png)
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris # data 수집 iris=load_iris() # column 컨트롤 col1=2 col2=3 # 전체 data에 대한 scatterplot graph plt.scatter(iris.data[:,col1],iris.data[:,col2],c=iris.target,alpha=0.7) X=iris.data[:,[col1]] # 주의 : col1->[col1] y=iris.data[:,col2] # LinearRegression model = LinearRegression() # train model.fit(X,y) #predict pred_y=mo..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/bea0Wx/btqDjffbqli/nfqBzzDLxSZLzv6E2GP541/img.png)
X=iris.data[50:,[0]] y=iris.data[50:,1] model=LinearRegression() model.fit(X,y) model.score(X,y) model.coef_, model.intercept_ # (array([0.27804192]), 1.1309015164752294) xxx=[X.min()-0.5,X.max()+0.5] yyy=model.coef_*xxx+model.intercept_ # 기울기 : w(weight)=coef_ ,절편 : b(ax+b)=intercept_ plt.scatter(X,y) plt.plot(xxx,yyy,'r:')
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/N4nfK/btqDj2l6z7x/blZ9VM2PUuMOtJosQnU8f1/img.png)
X=iris.data[:,:3] # 만약 X=iris.data[:,3] 이렇게 넣으면 차원이 맞지 않는다. # ex) error : x=[1,2,3] -> # sol1 ) X=[[1],[2],[3]] # sol2 ) X=iris.data[:,[2]] # sol3 ) X=iris.data[:,2].reshape(-1,1) # because : X는 2차원 형태여야 한다. y=iris.data[:,3] from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y) from sklearn.linear_model import LinearRegression model=LinearRegres..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/DzkCo/btqDiaL3BpI/1L8xlPY3JtQnOD1WbbVZLk/img.png)
# data 준비 X=iris.data y=iris.target # score 수집 scores=[] for i in range(4): col=(np.array([1,2,3])+i)%4 train_X,test_X,train_y,test_y=train_test_split(X[:,col],y) ''' print(col) [1 2 3] [2 3 0] [3 0 1] [0 1 2] ''' # model 설정(k=5) model=KNeighborsClassifier(5) # model 훈련 model.fit(train_X,train_y) # score 수집 scores.append(model.score(test_X,test_y)) # 시각적 표현 plt.plot(scores) plt.xticks([0,1,2,3])..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/NdCx9/btqDk5JhS3r/hEHAy3BF7RT9X2FKrd8q7k/img.png)
https://broscoding.tistory.com/114 머신러닝.iris data 불러오기 import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris iris=load_iris() iris dir(iris) ['DESCR', 'data', 'feature_names', 'target', 'target_names'] iris.data.shape.. broscoding.tistory.com https://broscoding.tistory.com/115 머신러닝.테스트데이터 뽑기 from sklearn.model_selection import train_test_split X_train,X_test,y..
![](http://i1.daumcdn.net/thumb/C150x150/?fname=https://blog.kakaocdn.net/dn/cP27NV/btqDkyLFHDd/SoFFEfkfE2zljPDoZRkmHk/img.png)
# 속성 컨트롤 col1=3 col2=1 X=iris.data[:,[col1,col2]] y=iris.target test_scores=[] train_scores=[] index=range(1,30) X_train,X_test,y_train,y_test=train_test_split(X,y) for k in index: model=KNeighborsClassifier(n_neighbors=k) model.fit(X_train,y_train) test_scores.append(model.score(X_test,y_test)) train_scores.append(model.score(X_train,y_train)) #plt.title('K에 따른 score변화') plt.plot(index,test_sco..