1、卷积神经网络
import math
import numpy
as np
import h5py
import matplotlib
.pyplot
as plt
import tensorflow
as tf
from tensorflow
.python
.framework
import ops
def load_dataset():
train_dataset
= h5py
.File
('datasets/train_signs.h5', "r")
train_set_x_orig
= np
.array
(train_dataset
["train_set_x"][:])
train_set_y_orig
= np
.array
(train_dataset
["train_set_y"][:])
test_dataset
= h5py
.File
('datasets/test_signs.h5', "r")
test_set_x_orig
= np
.array
(test_dataset
["test_set_x"][:])
test_set_y_orig
= np
.array
(test_dataset
["test_set_y"][:])
classes
= np
.array
(test_dataset
["list_classes"][:])
train_set_y_orig
= train_set_y_orig
.reshape
((1, train_set_y_orig
.shape
[0]))
test_set_y_orig
= test_set_y_orig
.reshape
((1, test_set_y_orig
.shape
[0]))
return train_set_x_orig
, train_set_y_orig
, test_set_x_orig
, test_set_y_orig
, classes
def random_mini_batches(X
, Y
, mini_batch_size
= 64, seed
= 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m
= X
.shape
[0]
mini_batches
= []
np
.random
.seed
(seed
)
permutation
= list(np
.random
.permutation
(m
))
shuffled_X
= X
[permutation
,:,:,:]
shuffled_Y
= Y
[permutation
,:]
num_complete_minibatches
= math
.floor
(m
/mini_batch_size
)
for k
in range(0, num_complete_minibatches
):
mini_batch_X
= shuffled_X
[k
* mini_batch_size
: k
* mini_batch_size
+ mini_batch_size
,:,:,:]
mini_batch_Y
= shuffled_Y
[k
* mini_batch_size
: k
* mini_batch_size
+ mini_batch_size
,:]
mini_batch
= (mini_batch_X
, mini_batch_Y
)
mini_batches
.append
(mini_batch
)
if m
% mini_batch_size
!= 0:
mini_batch_X
= shuffled_X
[num_complete_minibatches
* mini_batch_size
: m
,:,:,:]
mini_batch_Y
= shuffled_Y
[num_complete_minibatches
* mini_batch_size
: m
,:]
mini_batch
= (mini_batch_X
, mini_batch_Y
)
mini_batches
.append
(mini_batch
)
return mini_batches
def convert_to_one_hot(Y
, C
):
Y
= np
.eye
(C
)[Y
.reshape
(-1)].T
return Y
def forward_propagation_for_predict(X
, parameters
):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
W1
= parameters
['W1']
b1
= parameters
['b1']
W2
= parameters
['W2']
b2
= parameters
['b2']
W3
= parameters
['W3']
b3
= parameters
['b3']
Z1
= tf
.add
(tf
.matmul
(W1
, X
), b1
)
A1
= tf
.nn
.relu
(Z1
)
Z2
= tf
.add
(tf
.matmul
(W2
, A1
), b2
)
A2
= tf
.nn
.relu
(Z2
)
Z3
= tf
.add
(tf
.matmul
(W3
, A2
), b3
)
return Z3
def predict(X
, parameters
):
W1
= tf
.convert_to_tensor
(parameters
["W1"])
b1
= tf
.convert_to_tensor
(parameters
["b1"])
W2
= tf
.convert_to_tensor
(parameters
["W2"])
b2
= tf
.convert_to_tensor
(parameters
["b2"])
W3
= tf
.convert_to_tensor
(parameters
["W3"])
b3
= tf
.convert_to_tensor
(parameters
["b3"])
params
= {"W1": W1
,
"b1": b1
,
"W2": W2
,
"b2": b2
,
"W3": W3
,
"b3": b3
}
x
= tf
.placeholder
("float", [12288, 1])
z3
= forward_propagation_for_predict
(x
, params
)
p
= tf
.argmax
(z3
)
sess
= tf
.Session
()
prediction
= sess
.run
(p
, feed_dict
= {x
: X
})
return prediction
1.1包的导入
1.2Zero-Padding
零填充会在图像边框周围添加零。通过调用以下函数。
1.3单层卷积网络
主要计算9个格子的和,也就是卷积神经网络的一小步。
1.4卷积神经网络-前向通过
1.5池化层
1.6卷积神经网络中的反向传播(OPTIONAL / UNGRADED)
1.6.1卷积神经网络后向传播
1.6.2池化层后向传播
转载请注明原文地址:https://tech.qufami.com/read-21341.html