Structure size change in Matlab function block error

2 vues (au cours des 30 derniers jours)
Sepehr Saadatmand
Sepehr Saadatmand le 5 Juin 2019
Hello fellows,
I developed a neural network code as an m.file and I am trying to implemen it in a matlab function block to implement it in simulink. The code is written based on structures. In this code I have functions that a variable needs to be assigned to different structure with different sizes. However, it seems that in simulink, you cannot change the size of variables. I resolved that issue for some variables by using "coder.varsize('variabe name')". But for structure with matrix cells that each tie their sizes changing I cannot do that. Here is my code, in function "L_model_forward" I am trying to update the variable "cache" in a for loop with different tructures. I am getting an error and I don't know how to fix this. here is one of the assosiated error "Function 'Virtual Inertia/MATLAB Function3' (#617.1269.1362), line 78, column 5:
"[AL, caches] = L_model_forward(X, net, hidden_layers_activation_fn,last_layers_a"
Launch diagnostic report."
function [J,U] = fcn(J_o,states,E)
global w1 w2 w3 b1 b2 b3
U=1;
J=1;
gamma=0.9;
s_base=10e3;
states(1:4)=states(1:4)/s_base;
net=weight2net(w1,w2,w3,b1,b2,b3)
[U X]=states2inp(states,E)
y=(gamma*J_o)+U;
net=FF_online_sim(X, y, net)
% [w1,w2,w3,b1,b2,b3]=net2weight(net);
end
%% input to net conversion
function net=weight2net(w1,w2,w3,b1,b2,b3)
net.W={w1, w2, w3};
net.B={b1', b2', b3'};
end
function [w1,w2,w3,b1,b2,b3]=net2weight(net)
w1=net.W{1};
w2=net.W{2};
w3=net.W{3};
b1=net.B{1};
b2=net.B{2};
b3=net.B{3};
end
%% states to utility and input
function [U X]=states2inp(states,E)
s=states;
D_P=s(1)-s(2);
P=s(2);
D_Q=s(3)-s(4);
Q=s(4);
delta=s(5);
D_f=60-s(6);
U=S_Error(states);
X=[D_P P D_Q Q delta D_f E];
end
%% Utilituy
function e=S_Error(states)
p_set=states(1);
Q_set=states(3);
p=states(2);
Q=states(4);
d_w=states(6);
e=sqrt(1*((Q_set-Q)^2)+((p_set-p)^2));%+(1)*((8e18*d_w)^2));
end
%% Define the multi-layer model using all the helper functions we wrote before
function net=FF_online_sim(X, y, net_in)
% initialize parameters
net = net_in;
hidden_layers_activation_fn='sigmoid';
last_layers_activation_fn='lin';
learning_rate_s=.01;
% iterate over L-layers to get the final output and the cache
[AL, caches] = L_model_forward(X, net, hidden_layers_activation_fn,last_layers_activation_fn);
% compute cost to plot it
% % iterate over L-layers backward to get gradients
% grads = L_model_backward(AL, y, caches, hidden_layers_activation_fn,last_layers_activation_fn);
%
% % update parameters
% net = update_parameters(net, grads, learning_rate_s);
% %acc=accuracy(X, net, y, hidden_layers_activation_fn,last_layers_activation_fn);
end
%% Initialization network
function net=initialize_parameters(layers_dims)
L = length(layers_dims);
for j=1:L-1
W{j}=(2*rand(layers_dims(j),layers_dims(j+1)))-1;
B{j}=zeros(1,layers_dims(j+1));
end
net.W=W;
net.B=B;
end
%% Activation Function Definition
function [A, Z]=sigmoid(Z)
A = 1 ./ (1+exp(-Z));
end
function [A, Z]=tanhyp(Z)
A = tanh(Z);
end
function [A, Z]=relu(Z)
A = max(0, Z);
end
function [A, Z]=leaky_relu(Z)
A = max(0.1 * Z, Z);
end
function [A, Z]=lin(Z)
A = Z;
end
%% plotting the activation
function plot_activation()
Z=-10:.1:10;
figure;
subplot(2,2,1);
[o,~]=sigmoid(Z);
plot(Z,o);
title('sigmoid')
subplot(2,2,2);
[o,~]=tanhyp(Z);
plot(Z,o);
title('tanh')
subplot(2,2,3);
[o,~]=relu(Z);
plot(Z,o);
title('relu(Z)')
subplot(2,2,4);
[o,~]=leaky_relu(Z);
plot(Z,o);
title('leaky_relu(Z)')
end
%% Feed Forward
% Define helper functions that will be used in L-model forward prop
function [Z , cache]=linear_forward(A_prev, W, b)
Z = A_prev*W + b;
cache.A_prev=A_prev;
cache.W=W;
cache.b=b;
%cache = (A_prev, W, b)
end
function [A, cache]=linear_activation_forward(A_prev, W, b, activation_fn)
if isequal(activation_fn,'sigmoid')
[Z, linear_cache] = linear_forward(A_prev, W, b);
[A, activation_cache] = sigmoid(Z);
elseif isequal(activation_fn,'tanh')
[Z, linear_cache] = linear_forward(A_prev, W, b);
[A, activation_cache] = tanhyp(Z);
elseif isequal(activation_fn,'relu')
[Z, linear_cache] = linear_forward(A_prev, W, b);
[A, activation_cache] = relu(Z);
elseif isequal(activation_fn,'leaky_relu')
[Z, linear_cache] = linear_forward(A_prev, W, b);
[A, activation_cache] = leaky_relu(Z);
elseif isequal(activation_fn,'lin')
[Z, linear_cache] = linear_forward(A_prev, W, b);
[A, activation_cache] = lin(Z);
end
cache.linear_cache=linear_cache;
cache.activation_cache=activation_cache;
%cache = (linear_cache, activation_cache)
end
function [AL, caches]=L_model_forward(X, net, hidden_layers_activation_fn,last_layers_activation_fn)
coder.extrinsic('delete')
coder.varsize('caches');
coder.varsize('cache');
coder.varsize('A_prev');
coder.varsize('A');
caches = [];
L = length(net.W);
%A_prev={X net.B{1} net.B{2} net.B{3}};
A=X;
for j=1:2%L-1
% [A_prev{j+1}, cache] = linear_activation_forward(A_prev{j},...
% net.W{j}, net.B{j},hidden_layers_activation_fn);
A_prev = A;
[A, cache] = linear_activation_forward(A_prev, net.W{j},...
net.B{j},hidden_layers_activation_fn)
%cache
%caches=[caches,cache]
end
AL=5
% [AL, cache] = linear_activation_forward(A_prev{j+1},net.W{L} , net.B{L},last_layers_activation_fn);
% caches=[caches cache];
%caches.append(cache)
%assert AL.shape == (1, X.shape[1])
end
%% Compute cross-entropy cost
function cost=compute_cost(AL, y)
[m ~] = size(y);
%cost = - (1/m) * sum(y.*log(AL)) + ((1 - y).*log(1 - AL)); %code log
%python
cost = - sqrt((1/m) * sum((y-AL).^2)) ; % khodam MSE
end
%% Backpropagation
function dZ=sigmoid_gradient(dA, Z)
[A, Z] = sigmoid(Z);
dZ = dA.*( A .* (1 - A)) ; %if works correct the rest
end
function dZ=tanhyp_gradient(dA, Z)
[A, Z] = tanhyp(Z);
dZ = dA.*(1 - (A.^2));
end
function dZ=relu_gradient(dA, Z)
[A, Z] = relu(Z);
dZ = dA.*(A > 0);
end
function dZ=lin_gradient(dA, Z)
[A, Z] = lin(Z);
dum=ones(size(A));
dZ = dA.*dum;
end
% define helper functions that will be used in L-model back-prop
function [dA_prev, dW, db]=linear_backword(dZ, cache)
A_prev=cache.A_prev;
W=cache.W;
b = cache.b;
[m ~] = size(A_prev);
dW = (1/m)* (A_prev'*dZ); %I am not sure
db = (1/m)*sum(dZ,1);%db = (1 ./ m) .* sum(dZ,1) %I am not sure
dA_prev = dZ*W'; %I am not sure
% assert dA_prev.shape == A_prev.shape
% assert dW.shape == W.shape
% assert db.shape == b.shape
end
function [dA_prev, dW, db]=linear_activation_backward(dA, cache, activation_fn)
linear_cache=cache.linear_cache;
activation_cache=cache.activation_cache;
%linear_cache, activation_cache = cache
if activation_fn == "sigmoid"
dZ = sigmoid_gradient(dA, activation_cache);
[dA_prev, dW, db] = linear_backword(dZ, linear_cache);
elseif activation_fn == "tanh"
dZ = tanhyp_gradient(dA, activation_cache);
[dA_prev, dW, db] = linear_backword(dZ, linear_cache);
elseif activation_fn == "relu"
dZ = relu_gradient(dA, activation_cache);
[dA_prev, dW, db] = linear_backword(dZ, linear_cache);
elseif activation_fn == "lin"
dZ = lin_gradient(dA, activation_cache);
[dA_prev, dW, db] = linear_backword(dZ, linear_cache);
end
end
function grads=L_model_backward(AL, y, caches, hidden_layers_activation_fn,last_layers_activation_fn)
y = reshape(y,size(AL));
L = length(caches);
grads = [];
%dAL = (AL - y)./(abs(AL).*(1 - AL)); % Whith Python cost "Cross
%entropy"
dAL = (AL - y); %with square error minimize
[grads.dA{L}, grads.dW{L}, grads.db{L}] = linear_activation_backward(dAL, caches(L), last_layers_activation_fn);
for j=L-1 :-1:1
current_cache = caches(j);
[grads.dA{j}, grads.dW{j}, grads.db{j}] = ...
linear_activation_backward(grads.dA{j+1}, current_cache,hidden_layers_activation_fn);
end
end
%% Update Parameters
function net=update_parameters(net_in, grads, learning_rate)
net=net_in;
L = length(net.W);
for j=1: L
net.W{j}= net.W{j}- learning_rate * grads.dW{j};
net.B{j}= net.B{j}- learning_rate * grads.db{j};
end
end
%% Define the multi-layer model using all the helper functions we wrote before
function net=L_layer_model(X, y, net_in, learning_rate, num_iterations,hidden_layers_activation_fn,last_layers_activation_fn)
% initialize parameters
net = initialize_parameters(net_in);
% intialize cost list
cost_list = [];
% iterate over num_iterations
RMS_Error=zeros(1,num_iterations);
learning_rate_s=linspace(learning_rate,0,num_iterations);
for i=1:num_iterations
% iterate over L-layers to get the final output and the cache
[AL, caches] = L_model_forward(X(i,:), net, hidden_layers_activation_fn,last_layers_activation_fn);
% compute cost to plot it
cost = compute_cost(AL, y(i));
% iterate over L-layers backward to get gradients
grads = L_model_backward(AL, y(i), caches, hidden_layers_activation_fn,last_layers_activation_fn);
% update parameters
net = update_parameters(net, grads, learning_rate_s(i));
RMS_Error(i)=er_RMS(X(i,:), net, y(i), hidden_layers_activation_fn,last_layers_activation_fn);
% append each 100th cost to the cost list
if rem((i + 1), 100) == 0
%fprintf("The cost after %d iterations is: %f}",i,cost)
end
if rem(i, 100) == 0
%cost_list.append(cost)
end
end
acc=accuracy(X, net, y, hidden_layers_activation_fn,last_layers_activation_fn)
figure;
x_p=1:i;
plot(x_p,RMS_Error)
%RMS_Error=er_RMS(X, net, y, hidden_layers_activation_fn)
% # plot the cost curve
% plt.figure(figsize=(10, 6))
% plt.plot(cost_list)
% plt.xlabel("Iterations (per hundreds)")
% plt.ylabel("Loss")
% plt.title(f"Loss curve for the learning rate = {learning_rate}")
end
function o=accuracy(X, net, y, activation_fn,last_layers_activation_fn)
[probs, caches] = L_model_forward(X, net, activation_fn,last_layers_activation_fn);
labels = (probs >= 0.5) * 1;
o = mean(labels == y) * 100;
end
function o=er_RMS(X, net, y, activation_fn,last_layers_activation_fn)
[probs, caches] = L_model_forward(X, net, activation_fn,last_layers_activation_fn);
o = sqrt(mean((probs-y).^2)) ;
end

Réponses (0)

Catégories

En savoir plus sur Sequence and Numeric Feature Data Workflows dans Help Center et File Exchange

Community Treasure Hunt

Find the treasures in MATLAB Central and discover how the community can help you!

Start Hunting!

Translated by