Error using dlnetwork/forward (line 580)
Layer 'sequence': Invalid input data. Invalid size of channel dimension. Layer expects
input with channel dimension size 2 but received input with size 392.
Error in classify_video_sequences_full_network_v1>modelGradients (line 472)
[dlYPred,state] = forward(dlnet,dlX);
Error in deep.internal.dlfeval (line 17)
[varargout{1:nargout}] = fun(x{:});
Error in deep.internal.dlfevalWithNestingCheck (line 19)
[varargout{1:nargout}] = deep.internal.dlfeval(fun,varargin{:});
Error in dlfeval (line 31)
[varargout{1:nargout}] = deep.internal.dlfevalWithNestingCheck(fun,varargin{:});
Error in classify_video_sequences_full_network_v1 (line 207)
[gradients,state,loss] = dlfeval(@modelGradients,dlnet,dlX,dlY);
sequenceInputLayer([1024 2],'Name','sequence')
bilstmLayer(2000,'OutputMode','last','Name','bilstm')
dropoutLayer(0.5,'Name','drop')
fullyConnectedLayer(numClasses,'Name','fc')
softmaxLayer('Name','softmax')
dlnet = dlnetwork(layersLSTM);
plots = "training-progress";
mbq = minibatchqueue(dsTrain,...
'MiniBatchSize',miniBatchSize,...
'MiniBatchFcn', @preprocessLabeledSequences,...
'MiniBatchFormat',{'SCTB',''});
if plots == "training-progress"
lineLossTrain = animatedline('Color',[0.85 0.325 0.098]);
iteration = iteration + 1;
[gradients,state,loss] = dlfeval(@modelGradients,dlnet,dlX,dlY);
learnRate = initialLearnRate/(1 + decay*iteration);
[dlnet,averageGrad,averageSqGrad] = adamupdate(dlnet,gradients,averageGrad,averageSqGrad, ...
iteration,learnRate,gradDecay,sqGradDecay);
if plots == "training-progress"
D = duration(0,0,toc(start),'Format','hh:mm:ss');
addpoints(lineLossTrain,iteration,double(gather(extractdata(loss))))
title("Epoch: " + epoch + " of " + numEpochs + ", Elapsed: " + string(D))
function [gradients,state,loss] = modelGradients(dlnet,dlX,Y)
[dlYPred,state] = forward(dlnet,dlX);
loss = crossentropy(dlYPred,Y);
gradients = dlgradient(loss,dlnet.Learnables);