3

I am new to Keras and Tensorflow. I am working on a face recognition project using deep learning. I am getting the class label of a subject as an output using this code (output of softmax layer) and the accuracy is 97.5% for my custom dataset of 100 classes.

But now I'm interested in the feature vector representation, so I want to pass the test images through the network and extract the output from activated dense layer before softmax (last layer). I referred Keras documentation, but nothing seemed to work for me. Can anyone please help me how to extract the output from the dense layer activation and save as a numpy array? Thanks in advance.

class Faces:
    @staticmethod
    def build(width, height, depth, classes, weightsPath=None):
        # initialize the model
        model = Sequential()
        model.add(Conv2D(100, (5, 5), padding="same",input_shape=(depth, height, width), data_format="channels_first"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        model.add(Conv2D(100, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # 3 set of CONV => RELU => POOL
        model.add(Conv2D(100, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        # 4 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        # 5 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # 6 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # set of FC => RELU layers
        model.add(Flatten())
        #model.add(Dense(classes))
        #model.add(Activation("relu"))

        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

ap = argparse.ArgumentParser()
ap.add_argument("-l", "--load-model", type=int, default=-1,
    help="(optional) whether or not pre-trained model should be loaded")
ap.add_argument("-w", "--weights", type=str,
    help="(optional) path to weights file")
args = vars(ap.parse_args())


path = 'C:\\Users\\Project\\FaceGallery'
image_paths = [os.path.join(path, f) for f in os.listdir(path)]
images = []
labels = []
name_map = {}
demo = {}
nbr = 0
j = 0
for image_path in image_paths:
    image_pil = Image.open(image_path).convert('L')
    image = np.array(image_pil, 'uint8')
    cv2.imshow("Image",image)
    cv2.waitKey(5)
    name = image_path.split("\\")[4][0:5]
    print(name)
    # Get the label of the image
    if name in demo.keys():
        pass
    else:
        demo[name] = j
        j = j+1
    nbr =demo[name]

    name_map[nbr] = name
    images.append(image)
    labels.append(nbr)
print(name_map)
# Training and testing data split ratio = 60:40
(trainData, testData, trainLabels, testLabels) = train_test_split(images, labels, test_size=0.4)

trainLabels = np_utils.to_categorical(trainLabels, 100)
testLabels = np_utils.to_categorical(testLabels, 100)

trainData = np.asarray(trainData)
testData = np.asarray(testData)

trainData = trainData[:, np.newaxis, :, :] / 255.0
testData = testData[:, np.newaxis, :, :] / 255.0

opt = SGD(lr=0.01)
model = Faces.build(width=200, height=200, depth=1, classes=100,
                    weightsPath=args["weights"] if args["load_model"] > 0 else None)

model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
if args["load_model"] < 0:
    model.fit(trainData, trainLabels, batch_size=10, epochs=300)
(loss, accuracy) = model.evaluate(testData, testLabels, batch_size=100, verbose=1)
print("Accuracy: {:.2f}%".format(accuracy * 100))
if args["save_model"] > 0:
    model.save_weights(args["weights"], overwrite=True)

for i in np.arange(0, len(testLabels)):
    probs = model.predict(testData[np.newaxis, i])
    prediction = probs.argmax(axis=1)
    image = (testData[i][0] * 255).astype("uint8")
    name = "Subject " + str(prediction[0])
    if prediction[0] in name_map:
        name = name_map[prediction[0]]
    cv2.putText(image, name, (5, 20), cv2.FONT_HERSHEY_PLAIN, 1.3, (255, 255, 255), 2)
    print("Predicted: {}, Actual: {}".format(prediction[0], np.argmax(testLabels[i])))
    cv2.imshow("Testing Face", image)
    cv2.waitKey(1000)

2 Answers 2

1

See https://keras.io/getting-started/faq/ How can I obtain the output of an intermediate layer?

You'll need to name the layer you want the output from by adding a 'name' argument to your definition. Like.. model.add(Dense(xx, name='my_dense'))
You can then define an intermediate model and run it by doing something like...

m2 = Model(inputs=model.input, outputs=model.get_layer('my_dense').output)
Y = m2.predict(X)
Sign up to request clarification or add additional context in comments.

1 Comment

I got this output: Tensor("dense_1/BiasAdd:0", shape=(?, 442), dtype=float32) But I need to print a numpy array which is a feature representation of the input image.
0

You can use .numpy() if you use TensorFlow 2 as backend in your model to get NumPy array as output. You can read this link to know more about it.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.