The most logical way to solve the problem was to split data from the sequential input layer by means of two FullyConnectedLayer with custom weights and both WeightLearnFactor and BiasLearnFactor null. An example follows:
inputLay = sequenceInputLayer(numInput,'Normalization','zscore','NormalizationDimension','channel','Name','input Layer');
fullyConn1 = fullyConnectedLayer(numInput-1,'Name','Fully Conn 1');
fullyConn2 = fullyConnectedLayer(1,'Name','Fully Conn 2');
lstm1 = lstmLayer(64,'OutputMode','sequence','Name','LSTM Layer 1');
fullyConn3 = fullyConnectedLayer(32,'Name','Fully Conn 3');
fullyConnOut = fullyConnectedLayer(numOutput,'Name','Fully Conn Out');
outputLay = regressionLayer('Name','Output Layer');
blstm1 = bilstmLayer(8,'OutputMode','sequence','Name','BiLSTM Layer 1');
concat = concatenationLayer(1,2,'Name','concat');
relu1 = reluLayer('Name','ReLU 1');
relu2 = reluLayer('Name','ReLU 2');
% Options for fully connected layers that split 6-th input
weights1=eye(numInput-1,numInput);
zerocolumn=zeros(numInput-1,1);
weights1=[weights1(:,1:5), zerocolumn ,weights1(:,6:end-1)];
fullyConn1.Weights=weights1;
fullyConn1.WeightLearnRateFactor=0;
fullyConn1.BiasLearnRateFactor=0;
weights2=zeros(1,numInput);
weights2(6)=1;
fullyConn2.Weights=weights2;
fullyConn2.WeightLearnRateFactor=0;
fullyConn2.BiasLearnRateFactor=0;
% Architecture
branch1=[
fullyConn1
lstm1
relu1
];
branch2=[
fullyConn2
blstm1
relu2
];
outlayers=[
concat
fullyConn3
fullyConnOut
outputLay
];
branch1graph = layerGraph(branch1);
branch2graph = layerGraph(branch2);
outlayersgraph = layerGraph(outlayers);
lgraph = addLayers(branch1graph,branch2graph.Layers);
lgraph = addLayers(lgraph,outlayersgraph.Layers);
lgraph = addLayers(lgraph,inputLay);
lgraph = connectLayers(lgraph,'input Layer','Fully Conn 1');
lgraph = connectLayers(lgraph,'input Layer','Fully Conn 2');
lgraph = connectLayers(lgraph,'ReLU 1','concat/in1');
lgraph = connectLayers(lgraph,'ReLU 2','concat/in2');