|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +""" |
| 3 | +MobileNet for ImageNet. |
| 4 | +""" |
| 5 | + |
| 6 | +import os |
| 7 | +# import numpy as np |
| 8 | +import tensorflow as tf |
| 9 | +from .. import _logging as logging |
| 10 | +from ..layers import (Layer, BatchNormLayer, Conv2d, DepthwiseConv2d, FlattenLayer, GlobalMeanPool2d, InputLayer, ReshapeLayer) |
| 11 | +from ..files import maybe_download_and_extract, assign_params, load_npz |
| 12 | + |
| 13 | +__all__ = [ |
| 14 | + 'MobileNetV1', |
| 15 | +] |
| 16 | + |
| 17 | + |
| 18 | +class MobileNetV1(Layer): |
| 19 | + """Pre-trained MobileNetV1 model. |
| 20 | +
|
| 21 | + Parameters |
| 22 | + ------------ |
| 23 | + x : placeholder |
| 24 | + shape [None, 224, 224, 3], value range [0, 1]. |
| 25 | + end_with : str |
| 26 | + The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. Default ``out`` i.e. the whole model. |
| 27 | + is_train : boolean |
| 28 | + Whether the model is used for training i.e. enable dropout. |
| 29 | + reuse : boolean |
| 30 | + Whether to reuse the model. |
| 31 | +
|
| 32 | + Examples |
| 33 | + --------- |
| 34 | + Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>__` |
| 35 | + >>> x = tf.placeholder(tf.float32, [None, 224, 224, 3]) |
| 36 | + >>> # get the whole model |
| 37 | + >>> net = tl.models.MobileNetV1(x) |
| 38 | + >>> # restore pre-trained parameters |
| 39 | + >>> sess = tf.InteractiveSession() |
| 40 | + >>> net.restore_params(sess) |
| 41 | + >>> # use for inferencing |
| 42 | + >>> probs = tf.nn.softmax(net.outputs) |
| 43 | +
|
| 44 | + Extract features and Train a classifier with 100 classes |
| 45 | + >>> x = tf.placeholder(tf.float32, [None, 224, 224, 3]) |
| 46 | + >>> # get model without the last layer |
| 47 | + >>> cnn = tl.models.MobileNetV1(x, end_with='reshape') |
| 48 | + >>> # add one more layer |
| 49 | + >>> net = Conv2d(cnn, 100, (1, 1), (1, 1), name='out') |
| 50 | + >>> net = FlattenLayer(net, name='flatten') |
| 51 | + >>> # initialize all parameters |
| 52 | + >>> sess = tf.InteractiveSession() |
| 53 | + >>> tl.layers.initialize_global_variables(sess) |
| 54 | + >>> # restore pre-trained parameters |
| 55 | + >>> cnn.restore_params(sess) |
| 56 | + >>> # train your own classifier (only update the last layer) |
| 57 | + >>> train_params = tl.layers.get_variables_with_name('output') |
| 58 | +
|
| 59 | + Reuse model |
| 60 | + >>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3]) |
| 61 | + >>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3]) |
| 62 | + >>> # get VGG without the last layer |
| 63 | + >>> net1 = tl.models.MobileNetV1(x1, end_with='reshape') |
| 64 | + >>> # reuse the parameters with different input |
| 65 | + >>> net2 = tl.models.MobileNetV1(x2, end_with='reshape', reuse=True) |
| 66 | + >>> # restore pre-trained parameters (as they share parameters, we don’t need to restore net2) |
| 67 | + >>> sess = tf.InteractiveSession() |
| 68 | + >>> net1.restore_params(sess) |
| 69 | +
|
| 70 | + """ |
| 71 | + |
| 72 | + def __init__(self, x, end_with='out', is_train=False, reuse=None): |
| 73 | + |
| 74 | + self.net = self.mobilenetv1(x, end_with, is_train, reuse) |
| 75 | + self.outputs = self.net.outputs |
| 76 | + self.all_params = self.net.all_params |
| 77 | + self.all_layers = self.net.all_layers |
| 78 | + self.all_drop = self.net.all_drop |
| 79 | + self.print_layers = self.net.print_layers |
| 80 | + self.print_params = self.net.print_params |
| 81 | + |
| 82 | + # @classmethod |
| 83 | + def mobilenetv1(self, x, end_with='out', is_train=False, reuse=None): |
| 84 | + with tf.variable_scope("mobilenetv1", reuse=reuse): |
| 85 | + n = InputLayer(x) |
| 86 | + n = self.conv_block(n, 32, strides=(2, 2), is_train=is_train, name="conv") |
| 87 | + if end_with in n.outputs.name: |
| 88 | + return n |
| 89 | + n = self.depthwise_conv_block(n, 64, is_train=is_train, name="depth1") |
| 90 | + if end_with in n.outputs.name: |
| 91 | + return n |
| 92 | + |
| 93 | + n = self.depthwise_conv_block(n, 128, strides=(2, 2), is_train=is_train, name="depth2") |
| 94 | + if end_with in n.outputs.name: |
| 95 | + return n |
| 96 | + n = self.depthwise_conv_block(n, 128, is_train=is_train, name="depth3") |
| 97 | + if end_with in n.outputs.name: |
| 98 | + return n |
| 99 | + |
| 100 | + n = self.depthwise_conv_block(n, 256, strides=(2, 2), is_train=is_train, name="depth4") |
| 101 | + if end_with in n.outputs.name: |
| 102 | + return n |
| 103 | + n = self.depthwise_conv_block(n, 256, is_train=is_train, name="depth5") |
| 104 | + if end_with in n.outputs.name: |
| 105 | + return n |
| 106 | + |
| 107 | + n = self.depthwise_conv_block(n, 512, strides=(2, 2), is_train=is_train, name="depth6") |
| 108 | + if end_with in n.outputs.name: |
| 109 | + return n |
| 110 | + n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth7") |
| 111 | + if end_with in n.outputs.name: |
| 112 | + return n |
| 113 | + n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth8") |
| 114 | + if end_with in n.outputs.name: |
| 115 | + return n |
| 116 | + n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth9") |
| 117 | + if end_with in n.outputs.name: |
| 118 | + return n |
| 119 | + n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth10") |
| 120 | + if end_with in n.outputs.name: |
| 121 | + return n |
| 122 | + n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth11") |
| 123 | + if end_with in n.outputs.name: |
| 124 | + return n |
| 125 | + |
| 126 | + n = self.depthwise_conv_block(n, 1024, strides=(2, 2), is_train=is_train, name="depth12") |
| 127 | + if end_with in n.outputs.name: |
| 128 | + return n |
| 129 | + n = self.depthwise_conv_block(n, 1024, is_train=is_train, name="depth13") |
| 130 | + if end_with in n.outputs.name: |
| 131 | + return n |
| 132 | + |
| 133 | + n = GlobalMeanPool2d(n, name='globalmeanpool') |
| 134 | + if end_with in n.outputs.name: |
| 135 | + return n |
| 136 | + # n = DropoutLayer(n, 1-1e-3, True, is_train, name='drop') |
| 137 | + # n = DenseLayer(n, 1000, act=tf.identity, name='output') # equal |
| 138 | + n = ReshapeLayer(n, [-1, 1, 1, 1024], name='reshape') |
| 139 | + if end_with in n.outputs.name: |
| 140 | + return n |
| 141 | + n = Conv2d(n, 1000, (1, 1), (1, 1), name='out') |
| 142 | + n = FlattenLayer(n, name='flatten') |
| 143 | + if end_with == 'out': |
| 144 | + return n |
| 145 | + |
| 146 | + raise Exception("end_with : conv, depth1, depth2 ... depth13, globalmeanpool, out") |
| 147 | + |
| 148 | + @classmethod |
| 149 | + def conv_block(cls, n, n_filter, filter_size=(3, 3), strides=(1, 1), is_train=False, name='conv_block'): |
| 150 | + # ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py |
| 151 | + with tf.variable_scope(name): |
| 152 | + n = Conv2d(n, n_filter, filter_size, strides, b_init=None, name='conv') |
| 153 | + n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm') |
| 154 | + return n |
| 155 | + |
| 156 | + @classmethod |
| 157 | + def depthwise_conv_block(cls, n, n_filter, strides=(1, 1), is_train=False, name="depth_block"): |
| 158 | + with tf.variable_scope(name): |
| 159 | + n = DepthwiseConv2d(n, (3, 3), strides, b_init=None, name='depthwise') |
| 160 | + n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm1') |
| 161 | + n = Conv2d(n, n_filter, (1, 1), (1, 1), b_init=None, name='conv') |
| 162 | + n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm2') |
| 163 | + return n |
| 164 | + |
| 165 | + def restore_params(self, sess, path='models'): |
| 166 | + logging.info("Restore pre-trained parameters") |
| 167 | + maybe_download_and_extract( |
| 168 | + 'mobilenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/', expected_bytes=25600116) # ls -al |
| 169 | + params = load_npz(name=os.path.join(path, 'mobilenet.npz')) |
| 170 | + assign_params(sess, params[:len(self.net.all_params)], self.net) |
| 171 | + del params |
0 commit comments