我們使用深度學(xué)習(xí)網(wǎng)絡(luò)實現(xiàn)波士頓房價預(yù)測,深度學(xué)習(xí)的目的就是尋找一個合適的函數(shù)輸出我們想要的結(jié)果。深度學(xué)習(xí)實際上是機器學(xué)習(xí)領(lǐng)域中一個研究方向,深度學(xué)習(xí)的目標是讓機器能夠像人一樣具有分析學(xué)習(xí)的能力,能夠識別文字、圖像、聲音等數(shù)據(jù)。我認為深度學(xué)習(xí)與機器學(xué)習(xí)最主要的區(qū)別就是神經(jīng)元。
公司主營業(yè)務(wù):成都網(wǎng)站建設(shè)、網(wǎng)站設(shè)計、移動網(wǎng)站開發(fā)等業(yè)務(wù)。幫助企業(yè)客戶真正實現(xiàn)互聯(lián)網(wǎng)宣傳,提高企業(yè)的競爭能力。創(chuàng)新互聯(lián)是一支青春激揚、勤奮敬業(yè)、活力青春激揚、勤奮敬業(yè)、活力澎湃、和諧高效的團隊。公司秉承以“開放、自由、嚴謹、自律”為核心的企業(yè)文化,感謝他們對我們的高要求,感謝他們從不同領(lǐng)域給我們帶來的挑戰(zhàn),讓我們激情的團隊有機會用頭腦與智慧不斷的給客戶帶來驚喜。創(chuàng)新互聯(lián)推出坡頭免費做網(wǎng)站回饋大家。
基本構(gòu)造
為什么引入激活函數(shù)
激活函數(shù)的種類
神經(jīng)網(wǎng)絡(luò)解決的問題有很多,例如分類、預(yù)測、回歸等。這里我們給出兩個解決類型。
分類
預(yù)測
使用paddle飛槳波士頓數(shù)據(jù)集
https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/text/UCIHousing_cn.html
## 繪圖
Batch = 0
Batchs = []
all_train_accs = []
def draw_train_acc(Batchs,train_accs):
title = "training accs"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("acc")
plt.plot(Batchs, train_accs, color = 'green', label = 'training accs')
plt.legend()
plt.grid()
plt.show()
all_train_loss = []
def draw_train_loss(Batchs,train_loss):
title = "training loss"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("loss")
plt.plot(Batchs, train_loss, color = 'red', label = 'training loss')
plt.legend()
plt.grid()
plt.show()
## 繪制真實值與預(yù)測值的對比圖
def draw_infer_result(groud_truths, infer_results):
title = 'Boston'
plt.title(title)
x = np.arange(1,20)
y = x
plt.plot(x,y);
plt.xlabel("ground truth")
plt.ylabel("infer result")
plt.scatter(groud_truths,infer_results,color='green',label='training cost')
plt.grid()
plt.show()
'''
核心
網(wǎng)絡(luò)搭建
'''
class MyDNN(paddle.nn.Layer):
def __init__(self):
super(MyDNN, self).__init__()
#self.linear1 = paddle.nn.Linear(13,1,None) #全連接層,paddle.nn.Linear(in_features,out_features,weight)
self.linear1 = paddle.nn.Linear(13, 32, None)
self.linear2 = paddle.nn.Linear(32, 64, None)
self.linear3 = paddle.nn.Linear(64, 32, None)
self.linear4 = paddle.nn.Linear(32, 1, None)
def forward(self, inputs): ## 傳播函數(shù)
x = self.linear1(inputs)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
'''
網(wǎng)絡(luò)訓(xùn)練與測試
'''
## 實例化
model = MyDNN()
model.train()
mse_loss = paddle.nn.MSELoss()
opt = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())
epochs_num = 100
for epochs in range(epochs_num):
for batch_id,data in enumerate(train_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict, label)
loss.backward()
opt.step()
opt.clear_grad()
if batch_id!=0 and batch_id%10 == 0:
Batch = Batch+10
Batchs.append(Batch)
all_train_loss.append(loss.numpy()[0])
print("epoch{},step:{},train_loss:{}".format(epochs,batch_id,loss.numpy()[0]))
paddle.save(model.state_dict(),"UCIHousingDNN")
draw_train_loss(Batchs,all_train_loss)
para_state = paddle.load("UCIHousingDNN")
model = MyDNN()
model.eval()
model.set_state_dict(para_state)
losses = []
for batch_id,data in enumerate(eval_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict,label)
losses.append(loss.numpy()[0])
avg_loss = np.mean(losses)
print(avg_loss)
draw_infer_result(label,predict)
## 深度學(xué)習(xí)框架
import paddle
import numpy as np
import os
import matplotlib.pyplot as plt
## 繪圖
Batch = 0
Batchs = []
all_train_accs = []
def draw_train_acc(Batchs,train_accs):
title = "training accs"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("acc")
plt.plot(Batchs, train_accs, color = 'green', label = 'training accs')
plt.legend()
plt.grid()
plt.show()
all_train_loss = []
def draw_train_loss(Batchs,train_loss):
title = "training loss"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("loss")
plt.plot(Batchs, train_loss, color = 'red', label = 'training loss')
plt.legend()
plt.grid()
plt.show()
## 繪制真實值與預(yù)測值的對比圖
def draw_infer_result(groud_truths, infer_results):
title = 'Boston'
plt.title(title)
x = np.arange(1,20)
y = x
plt.plot(x,y);
plt.xlabel("ground truth")
plt.ylabel("infer result")
plt.scatter(groud_truths,infer_results,color='green',label='training cost')
plt.grid()
plt.show()
'''
數(shù)據(jù)集加載
'''
train_dataset = paddle.text.datasets.UCIHousing(mode="train")
eval_dataset = paddle.text.datasets.UCIHousing(mode="test")
train_loader = paddle.io.DataLoader(train_dataset,batch_size=32, shuffle=True)
eval_loader = paddle.io.DataLoader(eval_dataset,batch_size=8,shuffle=False)
print(train_dataset[1])
'''
核心
網(wǎng)絡(luò)搭建
'''
class MyDNN(paddle.nn.Layer):
def __init__(self):
super(MyDNN, self).__init__()
#self.linear1 = paddle.nn.Linear(13,1,None) #全連接層,paddle.nn.Linear(in_features,out_features,weight)
self.linear1 = paddle.nn.Linear(13, 32, None)
self.linear2 = paddle.nn.Linear(32, 64, None)
self.linear3 = paddle.nn.Linear(64, 32, None)
self.linear4 = paddle.nn.Linear(32, 1, None)
def forward(self, inputs): ## 傳播函數(shù)
x = self.linear1(inputs)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
'''
網(wǎng)絡(luò)訓(xùn)練與測試
'''
## 實例化
model = MyDNN()
model.train()
mse_loss = paddle.nn.MSELoss()
opt = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())
epochs_num = 100
for epochs in range(epochs_num):
for batch_id,data in enumerate(train_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict, label)
loss.backward()
opt.step()
opt.clear_grad()
if batch_id!=0 and batch_id%10 == 0:
Batch = Batch+10
Batchs.append(Batch)
all_train_loss.append(loss.numpy()[0])
print("epoch{},step:{},train_loss:{}".format(epochs,batch_id,loss.numpy()[0]))
paddle.save(model.state_dict(),"UCIHousingDNN")
draw_train_loss(Batchs,all_train_loss)
para_state = paddle.load("UCIHousingDNN")
model = MyDNN()
model.eval()
model.set_state_dict(para_state)
losses = []
for batch_id,data in enumerate(eval_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict,label)
losses.append(loss.numpy()[0])
avg_loss = np.mean(losses)
print(avg_loss)
draw_infer_result(label,predict)
文章題目:【深度學(xué)習(xí)】DNN房價預(yù)測
文章位置:http://m.kartarina.com/article2/dsogioc.html
成都網(wǎng)站建設(shè)公司_創(chuàng)新互聯(lián),為您提供面包屑導(dǎo)航、網(wǎng)站營銷、軟件開發(fā)、網(wǎng)站內(nèi)鏈、營銷型網(wǎng)站建設(shè)、移動網(wǎng)站建設(shè)
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉(zhuǎn)載內(nèi)容為主,如果涉及侵權(quán)請盡快告知,我們將會在第一時間刪除。文章觀點不代表本網(wǎng)站立場,如需處理請聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉(zhuǎn)載,或轉(zhuǎn)載時需注明來源: 創(chuàng)新互聯(lián)