Ground Detection(segmentation) for Autonomous Driving

close all;
% clear all;
clc;
resnet50();
outputFolder = fullfile('/home/hyphen/Downloads','data_road_224');
imgDir = fullfile(outputFolder,'training','image_2');
imds = imageDatastore(imgDir);
%auimds = augmentedImageDatastore([375 1242 3],imds);
I = readimage(imds,1);
I = histeq(I);
imshow(I)
classNames = [
"rightroad"
"leftroad"
"environment"
];
labelIDs = kittiPixelLabelIDs();
labelDir = fullfile(outputFolder,'training','gt_image_2');
% imds1 = imageDatastore(labelDir);
% pxds1 = augmentedImageDatastore([375 1242 3],imds1);
%
% filename = '/tmp/data_road/training/gt1';
% pxds2 = imwrite(pxds1,filename);
pxds = pixelLabelDatastore(labelDir,classNames,labelIDs);
C = readimage(pxds,1);
cmap = kittiColorMap;
B = labeloverlay(I,C,'ColorMap',cmap);
imshow(B)
pixelLabelColorbar(cmap,classNames);
tbl = countEachLabel(pxds)
% Prepare Training, Validation, and Test Sets
[imdsTrain, imdsVal, imdsTest, pxdsTrain, pxdsVal, pxdsTest] = partitionkittiData(imds,pxds);
numTrainingImages = numel(imdsTrain.Files)
numValImages = numel(imdsVal.Files)
numTestingImages = numel(imdsTest.Files)
% Create the Network
imageSize = [224 224 3];
numClasses = numel(classNames)
lgraph = deeplabv3plusLayers(imageSize, numClasses, "resnet50");
% Balance Classes Using Class Weighting
imageFreq = tbl.PixelCount ./ tbl.ImagePixelCount;
classWeights = median(imageFreq) ./ imageFreq
pxLayer = pixelClassificationLayer('Name','labels','Classes',tbl.Name,'ClassWeights',classWeights);
lgraph = replaceLayer(lgraph,"classification",pxLayer);
% Select Training Options
% Define validation data.
pximdsVal = pixelLabelImageDatastore(imdsVal,pxdsVal);
% Define training options.
options = trainingOptions('sgdm', 'LearnRateSchedule','piecewise', 'LearnRateDropPeriod',10,'LearnRateDropFactor',0.3,'Momentum',0.9,'InitialLearnRate',0.001,'L2Regularization',0.005,'ValidationData',pximdsVal,'MaxEpochs',50, 'MiniBatchSize',1,'Shuffle','every-epoch','CheckpointPath', tempdir,'VerboseFrequency',2,'Plots','training-progress','ValidationPatience', Inf);
% Data Augmentation
augmenter = imageDataAugmenter('RandXReflection',true,'RandXTranslation',[-10 10],'RandYTranslation',[-10 10]);
% Start Training
pximds = pixelLabelImageDatastore(imdsTrain,pxdsTrain,'DataAugmentation',augmenter);
doTraining = true;
if doTraining
[net, info] = trainNetwork(pximds,lgraph,options);
else
data = load(pretrainedNetwork);
net = data.net;
end
% Test Network on One Image
I1 = readimage(imdsTest,30);
J = histeq(I1);
figure
imshow(I1)
C = semanticseg(J, net);
B = labeloverlay(J,C,'Colormap',cmap,'Transparency',0.3);
figure
imshow(B)
pixelLabelColorbar(cmap, classNames);
function labelIDs = kittiPixelLabelIDs()
% Return the label IDs corresponding to each class.
%
% The CamVid dataset has 32 classes. Group them into 11 classes following
% the original SegNet training methodology [1].
%
% The 11 classes are:
% "Sky" "Building", "Pole", "Road", "Pavement", "Tree", "SignSymbol",
% "Fence", "Car", "Pedestrian", and "Bicyclist".
%
% CamVid pixel label IDs are provided as RGB color values. Group them into
% 11 classes and return them as a cell array of M-by-3 matrices. The
% original CamVid class names are listed alongside each RGB value. Note
% that the Other/Void class are excluded below.
labelIDs = { ...
% "rightroad"
[
255 000 255; ... % "rightroad"
]
% "leftroad"
[
000 000 000; ... % "leftroad"
]
% "environment"
[
255 000 000; ... % "environment"
]
};
end
function cmap = kittiColorMap()
% Define the colormap used by CamVid dataset.
cmap = [
255 0 255 % rightroad
0 0 0 % leftroad
255 0 0 % environment
];
% Normalize between [0 1].
cmap = cmap ./ 255;
end
function pixelLabelColorbar(cmap, classNames)
% Add a colorbar to the current axis. The colorbar is formatted
% to display the class names with the color.
colormap(gca,cmap)
% Add colorbar to current figure.
c = colorbar('peer', gca);
% Use class names for tick marks.
c.TickLabels = classNames;
numClasses = size(cmap,1);
% Center tick labels.
c.Ticks = 1/(numClasses*2):1/numClasses:1;
% Remove tick mark.
c.TickLength = 0;
end
function [imdsTrain, imdsVal, imdsTest, pxdsTrain, pxdsVal, pxdsTest] = partitionkittiData(imds,pxds)
% Partition CamVid data by randomly selecting 60% of the data for training. The
% rest is used for testing.
% Set initial random state for example reproducibility.
rng(0);
numFiles = numel(imds.Files);
shuffledIndices = randperm(numFiles);
% Use 60% of the images for training.
numTrain = round(0.60 * numFiles);
trainingIdx = shuffledIndices(1:numTrain);
% Use 20% of the images for validation
numVal = round(0.20 * numFiles);
valIdx = shuffledIndices(numTrain+1:numTrain+numVal);
% Use the rest for testing.
testIdx = shuffledIndices(numTrain+numVal+1:end);
% Create image datastores for training and test.
trainingImages = imds.Files(trainingIdx);
valImages = imds.Files(valIdx);
testImages = imds.Files(testIdx);
imdsTrain = imageDatastore(trainingImages);
imdsVal = imageDatastore(valImages);
imdsTest = imageDatastore(testImages);
% Extract class and label IDs info.
classes = pxds.ClassNames;
labelIDs = kittiPixelLabelIDs();
% Create pixel label datastores for training and test.
trainingLabels = pxds.Files(trainingIdx);
valLabels = pxds.Files(valIdx);
testLabels = pxds.Files(testIdx);
pxdsTrain = pixelLabelDatastore(trainingLabels, classes, labelIDs);
pxdsVal = pixelLabelDatastore(valLabels, classes, labelIDs);
pxdsTest = pixelLabelDatastore(testLabels, classes, labelIDs);
end

Comments