Git Product home page Git Product logo

dvn's People

Contributors

irfanicmll avatar

Stargazers

 avatar

Watchers

 avatar

dvn's Issues

resnet_16s

from models.network import NetWork
import tensorflow as tf

class ResNet38(NetWork):
def setup(self, is_training, num_classes):
pass

class ResNet50(NetWork):
pass

class ResNet101(NetWork):
'''Network definition.

Args:
  is_training: whether to update the running mean and variance of the batch normalisation layer.
               If the batch size is small, it is better to keep the running mean and variance of
               the-pretrained model frozen.
  num_classes: number of classes to predict (including background).
'''

def setup(self, is_training, num_classes):
    (self.feed('data')
     .conv([7, 7], 64, [2, 2], biased=False, relu=False, name='conv1')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn_conv1')
     .max_pool([3, 3], [2, 2], name='pool1')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res2a_branch1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch1'))

    (self.feed('pool1')
     .conv([1, 1], 64, [1, 1], biased=False, relu=False, name='res2a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2a')
     .conv([3, 3], 64, [1, 1], biased=False, relu=False, name='res2a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2b')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res2a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch2c'))

    (self.feed('bn2a_branch1',
               'bn2a_branch2c')  # output_stride = 4
     .add(name='res2a')
     .relu(name='res2a_relu')
     .conv([1, 1], 64, [1, 1], biased=False, relu=False, name='res2b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2a')
     .conv([3, 3], 64, [1, 1], biased=False, relu=False, name='res2b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2b')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res2b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn2b_branch2c'))

    (self.feed('res2a_relu',
               'bn2b_branch2c')
     .add(name='res2b')
     .relu(name='res2b_relu')
     .conv([1, 1], 64, [1, 1], biased=False, relu=False, name='res2c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2a')
     .conv([3, 3], 64, [1, 1], biased=False, relu=False, name='res2c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2b')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res2c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn2c_branch2c'))

    (self.feed('res2b_relu',
               'bn2c_branch2c')
     .add(name='res2c')
     .relu(name='res2c_relu')
     .conv([1, 1], 512, [2, 2], biased=False, relu=False, name='res3a_branch1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch1'))

    (self.feed('res2c_relu')
     .conv([1, 1], 128, [2, 2], biased=False, relu=False, name='res3a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2a')
     .conv([3, 3], 128, [1, 1], biased=False, relu=False, name='res3a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2b')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res3a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch2c'))

    (self.feed('bn3a_branch1',
               'bn3a_branch2c')  # output_stride = 8
     .add(name='res3a')
     .relu(name='res3a_relu')
     .conv([1, 1], 128, [1, 1], biased=False, relu=False, name='res3b1_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2a')
     .conv([3, 3], 128, [1, 1], biased=False, relu=False, name='res3b1_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2b')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res3b1_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b1_branch2c'))

    (self.feed('res3a_relu',
               'bn3b1_branch2c')
     .add(name='res3b1')
     .relu(name='res3b1_relu')
     .conv([1, 1], 128, [1, 1], biased=False, relu=False, name='res3b2_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2a')
     .conv([3, 3], 128, [1, 1], biased=False, relu=False, name='res3b2_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2b')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res3b2_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b2_branch2c'))

    (self.feed('res3b1_relu',
               'bn3b2_branch2c')
     .add(name='res3b2')
     .relu(name='res3b2_relu')
     .conv([1, 1], 128, [1, 1], biased=False, relu=False, name='res3b3_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2a')
     .conv([3, 3], 128, [1, 1], biased=False, relu=False, name='res3b3_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2b')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res3b3_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn3b3_branch2c'))

    (self.feed('res3b2_relu',
               'bn3b3_branch2c')
     .add(name='res3b3')
     .relu(name='res3b3_relu')
     .conv([1, 1], 1024, [2, 2], biased=False, relu=False, name='res4a_branch1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch1'))

    (self.feed('res3b3_relu')
     .conv([1, 1], 256, [2, 2], biased=False, relu=False, name='res4a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4a_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch2c'))

    (self.feed('bn4a_branch1',
               'bn4a_branch2c')  # output_stride = 16
     .add(name='res4a')
     .relu(name='res4a_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b1_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b1_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b1_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b1_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b1_branch2c'))

    (self.feed('res4a_relu',
               'bn4b1_branch2c')
     .add(name='res4b1')
     .relu(name='res4b1_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b2_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b2_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b2_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b2_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b2_branch2c'))

    (self.feed('res4b1_relu',
               'bn4b2_branch2c')
     .add(name='res4b2')
     .relu(name='res4b2_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b3_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b3_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b3_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b3_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b3_branch2c'))

    (self.feed('res4b2_relu',
               'bn4b3_branch2c')
     .add(name='res4b3')
     .relu(name='res4b3_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b4_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b4_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b4_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b4_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b4_branch2c'))

    (self.feed('res4b3_relu',
               'bn4b4_branch2c')
     .add(name='res4b4')
     .relu(name='res4b4_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b5_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b5_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b5_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b5_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b5_branch2c'))

    (self.feed('res4b4_relu',
               'bn4b5_branch2c')
     .add(name='res4b5')
     .relu(name='res4b5_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b6_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b6_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b6_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b6_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b6_branch2c'))

    (self.feed('res4b5_relu',
               'bn4b6_branch2c')
     .add(name='res4b6')
     .relu(name='res4b6_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b7_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b7_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b7_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b7_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b7_branch2c'))

    (self.feed('res4b6_relu',
               'bn4b7_branch2c')
     .add(name='res4b7')
     .relu(name='res4b7_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b8_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b8_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b8_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b8_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b8_branch2c'))

    (self.feed('res4b7_relu',
               'bn4b8_branch2c')
     .add(name='res4b8')
     .relu(name='res4b8_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b9_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b9_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b9_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b9_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b9_branch2c'))

    (self.feed('res4b8_relu',
               'bn4b9_branch2c')
     .add(name='res4b9')
     .relu(name='res4b9_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b10_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b10_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b10_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b10_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b10_branch2c'))

    (self.feed('res4b9_relu',
               'bn4b10_branch2c')
     .add(name='res4b10')
     .relu(name='res4b10_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b11_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b11_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b11_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b11_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b11_branch2c'))

    (self.feed('res4b10_relu',
               'bn4b11_branch2c')
     .add(name='res4b11')
     .relu(name='res4b11_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b12_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b12_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b12_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b12_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b12_branch2c'))

    (self.feed('res4b11_relu',
               'bn4b12_branch2c')
     .add(name='res4b12')
     .relu(name='res4b12_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b13_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b13_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b13_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b13_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b13_branch2c'))

    (self.feed('res4b12_relu',
               'bn4b13_branch2c')
     .add(name='res4b13')
     .relu(name='res4b13_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b14_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b14_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b14_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b14_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b14_branch2c'))

    (self.feed('res4b13_relu',
               'bn4b14_branch2c')
     .add(name='res4b14')
     .relu(name='res4b14_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b15_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b15_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b15_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b15_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b15_branch2c'))

    (self.feed('res4b14_relu',
               'bn4b15_branch2c')
     .add(name='res4b15')
     .relu(name='res4b15_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b16_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b16_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b16_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b16_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b16_branch2c'))

    (self.feed('res4b15_relu',
               'bn4b16_branch2c')
     .add(name='res4b16')
     .relu(name='res4b16_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b17_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b17_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b17_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b17_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b17_branch2c'))

    (self.feed('res4b16_relu',
               'bn4b17_branch2c')
     .add(name='res4b17')
     .relu(name='res4b17_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b18_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b18_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b18_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b18_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b18_branch2c'))

    (self.feed('res4b17_relu',
               'bn4b18_branch2c')
     .add(name='res4b18')
     .relu(name='res4b18_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b19_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b19_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b19_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b19_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b19_branch2c'))

    (self.feed('res4b18_relu',
               'bn4b19_branch2c')
     .add(name='res4b19')
     .relu(name='res4b19_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b20_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b20_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b20_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b20_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b20_branch2c'))

    (self.feed('res4b19_relu',
               'bn4b20_branch2c')
     .add(name='res4b20')
     .relu(name='res4b20_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b21_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b21_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b21_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b21_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b21_branch2c'))

    (self.feed('res4b20_relu',
               'bn4b21_branch2c')
     .add(name='res4b21')
     .relu(name='res4b21_relu')
     .conv([1, 1], 256, [1, 1], biased=False, relu=False, name='res4b22_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2a')
     .conv([3, 3], 256, [1, 1], biased=False, relu=False, name='res4b22_branch2b')
     #.atrous_conv([3, 3], 256, 2, padding='SAME', biased=False, relu=False, name='res4b22_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2b')
     .conv([1, 1], 1024, [1, 1], biased=False, relu=False, name='res4b22_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn4b22_branch2c'))

    (self.feed('res4b21_relu',
               'bn4b22_branch2c')
     .add(name='res4b22')
     .relu(name='res4b22_relu')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='res5a_branch1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch1'))

    (self.feed('res4b22_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res5a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2a')
     .atrous_conv([3, 3], 512, 2, padding='SAME', biased=False, relu=False, name='res5a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='res5a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch2c'))

    (self.feed('bn5a_branch1',
               'bn5a_branch2c')  # output_stride = 16
     .add(name='res5a')
     .relu(name='res5a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res5b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2a')
     .atrous_conv([3, 3], 512, 4, padding='SAME', biased=False, relu=False, name='res5b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='res5b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn5b_branch2c'))

    (self.feed('res5a_relu',
               'bn5b_branch2c')
     .add(name='res5b')
     .relu(name='res5b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='res5c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5c_branch2a')
     .atrous_conv([3, 3], 512, 8, padding='SAME', biased=False, relu=False, name='res5c_branch2b')
     .batch_normalization(activation_fn=tf.nn.relu, name='bn5c_branch2b', is_training=is_training)
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='res5c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='bn5c_branch2c'))

    (self.feed('res5b_relu',
               'bn5c_branch2c')
     .add(name='res5c')
     .relu(name='res5c_relu'))

deeplabv3_s16

coding:utf-8

import tensorflow as tf

from models.network import NetWork

class DeepLabV2(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitems()[0]
assert type(inputs) == str
(self.feed(inputs)
.atrous_conv([3, 3], num_classes, 6, padding='SAME', relu=False, name='fc1_voc12_c0'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 12, padding='SAME', relu=False, name='fc1_voc12_c1'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 18, padding='SAME', relu=False, name='fc1_voc12_c2'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 24, padding='SAME', relu=False, name='fc1_voc12_c3'))

    (self.feed('fc1_voc12_c0',
               'fc1_voc12_c1',
               'fc1_voc12_c2',
               'fc1_voc12_c3')
     .add(name='fc1_voc12'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

class DeepLabV3(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitem()[0]
# assert type(inputs) == str

    (self.feed(inputs)
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2a')
     .atrous_conv([3, 3], 512, 4, padding='SAME', biased=False, relu=False, name='fc_res6a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6a_branch2c'))

    (self.feed(inputs,
               'fc_bn6a_branch2c')
     .add(name='fc_res6a')
     .relu(name='fc_res6a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2a')
     .atrous_conv([3, 3], 512, 8, padding='SAME', biased=False, relu=False, name='fc_res6b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6b_branch2c'))

    (self.feed('fc_res6a_relu',
               'fc_bn6b_branch2c')
     .add(name='fc_res6b')
     .relu(name='fc_res6b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res6c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6c_branch2c'))

    (self.feed('fc_res6b_relu',
               'fc_bn6c_branch2c')
     .add(name='fc_res6c')
     .relu(name='fc_res6c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2a')
     .atrous_conv([3, 3], 512, 8, padding='SAME', biased=False, relu=False, name='fc_res7a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7a_branch2c'))

    (self.feed('fc_res6c_relu',
               'fc_bn7a_branch2c')
     .add(name='fc_res7a')
     .relu(name='fc_res7a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res7b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7b_branch2c'))

    (self.feed('fc_res7a_relu',
               'fc_bn7b_branch2c')
     .add(name='fc_res7b')
     .relu(name='fc_res7b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res7c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7c_branch2c'))

    (self.feed('fc_res7b_relu',
               'fc_bn7c_branch2c')
     .add(name='fc_res7c')
     .relu(name='fc_res7c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res8a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8a_branch2c'))

    (self.feed('fc_res7c_relu',
               'fc_bn8a_branch2c')
     .add(name='fc_res8a')
     .relu(name='fc_res8a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res8b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8b_branch2c'))

    (self.feed('fc_res8a_relu',
               'fc_bn8b_branch2c')
     .add(name='fc_res8b')
     .relu(name='fc_res8b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res8c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8c_branch2c'))

    (self.feed('fc_res8b_relu',
               'fc_bn8c_branch2c')
     .add(name='fc_res8c')
     .relu(name='fc_res8c_relu'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 6, padding='SAME', relu=False, name='fc1_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn0'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 12, padding='SAME', relu=False, name='fc1_voc12_c1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn1'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 18, padding='SAME', relu=False, name='fc1_voc12_c2')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn2'))

    (self.feed('fc_res8c_relu')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c3')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn3'))

    layer = self.get_appointed_layer('fc_res8c_relu')
    new_shape = tf.shape(layer)[1:3]
    (self.feed('fc_res8c_relu')
     .global_average_pooling(name='fc1_voc12_mp0')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c4')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn4')
     .resize(new_shape, name='fc1_voc12_bu0'))

    (self.feed('fc1_voc12_bn0',
               'fc1_voc12_bn1',
               'fc1_voc12_bn2',
               'fc1_voc12_bn3',
               'fc1_voc12_bu0')
     .concat(axis=3, name='fc1_voc12'))

    (self.feed('fc1_voc12')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc2_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc2_voc12_bn0')
     .conv([1, 1], num_classes, [1, 1], relu=False, name='fc2_voc12_c1'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

deeplab_v3_s8

coding:utf-8

import tensorflow as tf

from models.network import NetWork

class DeepLabV2(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitems()[0]
assert type(inputs) == str
(self.feed(inputs)
.atrous_conv([3, 3], num_classes, 6, padding='SAME', relu=False, name='fc1_voc12_c0'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 12, padding='SAME', relu=False, name='fc1_voc12_c1'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 18, padding='SAME', relu=False, name='fc1_voc12_c2'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 24, padding='SAME', relu=False, name='fc1_voc12_c3'))

    (self.feed('fc1_voc12_c0',
               'fc1_voc12_c1',
               'fc1_voc12_c2',
               'fc1_voc12_c3')
     .add(name='fc1_voc12'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

class DeepLabV3(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitem()[0]
assert type(inputs) == str

    (self.feed(inputs)
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2a')
     .atrous_conv([3, 3], 512, 8, padding='SAME', biased=False, relu=False, name='fc_res6a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6a_branch2c'))

    (self.feed(inputs,
               'fc_bn6a_branch2c')
     .add(name='fc_res6a')
     .relu(name='fc_res6a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res6b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6b_branch2c'))

    (self.feed('fc_res6a_relu',
               'fc_bn6b_branch2c')
     .add(name='fc_res6b')
     .relu(name='fc_res6b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res6c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6c_branch2c'))

    (self.feed('fc_res6b_relu',
               'fc_bn6c_branch2c')
     .add(name='fc_res6c')
     .relu(name='fc_res6c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res7a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7a_branch2c'))

    (self.feed('fc_res6c_relu',
               'fc_bn7a_branch2c')
     .add(name='fc_res7a')
     .relu(name='fc_res7a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res7b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7b_branch2c'))

    (self.feed('fc_res7a_relu',
               'fc_bn7b_branch2c')
     .add(name='fc_res7b')
     .relu(name='fc_res7b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res7c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7c_branch2c'))

    (self.feed('fc_res7b_relu',
               'fc_bn7c_branch2c')
     .add(name='fc_res7c')
     .relu(name='fc_res7c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res8a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8a_branch2c'))

    (self.feed('fc_res7c_relu',
               'fc_bn8a_branch2c')
     .add(name='fc_res8a')
     .relu(name='fc_res8a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res8b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8b_branch2c'))

    (self.feed('fc_res8a_relu',
               'fc_bn8b_branch2c')
     .add(name='fc_res8b')
     .relu(name='fc_res8b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2a')
     .atrous_conv([3, 3], 512, 128, padding='SAME', biased=False, relu=False, name='fc_res8c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8c_branch2c'))

    (self.feed('fc_res8b_relu',
               'fc_bn8c_branch2c')
     .add(name='fc_res8c')
     .relu(name='fc_res8c_relu'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 12, padding='SAME', relu=False, name='fc1_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn0'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 24, padding='SAME', relu=False, name='fc1_voc12_c1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn1'))

    (self.feed('fc_res8c_relu')
     .atrous_conv([3, 3], 256, 36, padding='SAME', relu=False, name='fc1_voc12_c2')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn2'))

    (self.feed('fc_res8c_relu')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c3')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn3'))

    layer = self.get_appointed_layer('fc_res8c_relu')
    new_shape = tf.shape(layer)[1:3]
    (self.feed('fc_res8c_relu')
     .global_average_pooling(name='fc1_voc12_mp0')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c4')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn4')
     .resize(new_shape, name='fc1_voc12_bu0'))

    (self.feed('fc1_voc12_bn0',
               'fc1_voc12_bn1',
               'fc1_voc12_bn2',
               'fc1_voc12_bn3',
               'fc1_voc12_bu0')
     .concat(axis=3, name='fc1_voc12'))

    (self.feed('fc1_voc12')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc2_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc2_voc12_bn0')
     .conv([1, 1], num_classes, [1, 1], relu=False, name='fc2_voc12_c1'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

networks

* coding: utf-8

import numpy as np
import tensorflow as tf

slim = tf.contrib.slim

def layer(op):
'''Decorator for composable network layers.'''

def layer_decorated(self, *args, **kwargs):
    # Automatically set a name if not provided.
    name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
    # Figure out the layer inputs.
    if len(self.terminals) == 0:
        raise RuntimeError('No input variables found for layer %s.' % name)
    elif len(self.terminals) == 1:
        layer_input = self.terminals[0]
    else:
        layer_input = list(self.terminals)
    # Perform the operation and get the output.
    layer_output = op(self, layer_input, *args, **kwargs)
    # Add to layer LUT.
    self.layers[name] = layer_output
    # This output is now the input for the next layer.
    self.feed(layer_output)
    # Return self for chained calls.
    return self

return layer_decorated

class NetWork(object):
def init(self, inputs, trainable=True, is_training=False, num_classes=21):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.placeholder_with_default(tf.constant(0.8, dtype=tf.float32),
shape=[], name='use_dropout')
self.setup(is_training, num_classes)

def setup(self, *args):
    '''Construct the network. '''
    raise NotImplementedError('Must be implemented by the subclass.')

def load(self, data_path, session, ignore_missing=False):
    '''Load network weights.
    data_path: The path to the numpy-serialized network weights
    session: The current TensorFlow session
    ignore_missing: If true, serialized weights for missing layers are ignored.
    '''
    data_dict = np.load(data_path).item()
    for op_name in data_dict:
        with tf.variable_scope(op_name, reuse=True):
            for param_name, data in data_dict[op_name].iteritems():
                try:
                    var = tf.get_variable(param_name)
                    session.run(var.assign(data))
                except ValueError:
                    if not ignore_missing:
                        raise

def feed(self, *args):
    '''Set the input(s) for the next operation by replacing the terminal nodes.
    The arguments can be either layer names or the actual layers.
    '''
    assert len(args) != 0
    self.terminals = []
    for fed_layer in args:
        if isinstance(fed_layer, str):
            try:
                fed_layer = self.layers[fed_layer]
            except KeyError:
                raise KeyError('Unknown layer name fed: %s' % fed_layer)
        self.terminals.append(fed_layer)
    return self

def get_appointed_layer(self, name):
    return self.layers[name]

def get_output(self):
    '''Returns the current network output.'''
    return self.terminals[-1]

def get_unique_name(self, prefix):
    '''Returns an index-suffixed unique name for the given prefix.
    This is used for auto-generating layer names based on the type-prefix.
    '''
    ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
    return '%s_%d' % (prefix, ident)

def make_var(self, name, shape):
    '''Creates a new TensorFlow variable.'''
    return tf.get_variable(name, shape, trainable=self.trainable)

def validate_padding(self, padding):
    '''Verifies that the padding is one of the supported ones.'''
    assert padding in ('SAME', 'VALID')

@layer
def conv(self, input, kernel, output_channel, strides, name,
         relu=True,
         padding='SAME',
         group=1,
         biased=True):

    # Verify that the padding is acceptable
    self.validate_padding(padding)
    # Get the number of channels in the input
    input_channel = input.get_shape().as_list()[-1]
    # Verify that the grouping parameter is valid
    assert input_channel % group == 0
    assert output_channel % group == 0
    # Convolution for a given input and kernel
    convolve = lambda i, k: tf.nn.conv2d(i, k, [1, strides[0], strides[1], 1], padding=padding)
    with tf.variable_scope(name) as scope:
        kernel = self.make_var('weights', shape=[kernel[0], kernel[1], input_channel / group, output_channel])
        if group == 1:
            # This is the common-case. Convolve the input without any further complications.
            output = convolve(input, kernel)
        else:
            # Split the input into groups and then convolve each of them independently
            input_groups = tf.split(input, group, 3)
            kernel_groups = tf.split(kernel, group, 3)
            output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
            # Concatenate the groups
            output = tf.concat(output_groups, 3)
        # Add the biases
        if biased:
            biases = self.make_var('biases', [output_channel])
            output = tf.nn.bias_add(output, biases)
        if relu:
            # ReLU non-linearity
            output = tf.nn.relu(output, name=scope.name)
        return output

@layer
def atrous_conv(self, input, kernel, output_channel, dilation, name,
                relu=True,
                padding='SAME',
                group=1,
                biased=True):
    # Verify that the padding is acceptable
    self.validate_padding(padding)
    # Get the number of channels in the input
    input_channel = input.get_shape().as_list()[-1]
    # Verify that the grouping parameter is valid
    assert input_channel % group == 0
    assert output_channel % group == 0
    # Convolution for a given input and kernel
    convolve = lambda i, k: tf.nn.atrous_conv2d(i, k, dilation, padding=padding)
    with tf.variable_scope(name) as scope:
        kernel = self.make_var('weights', shape=[kernel[0], kernel[1], input_channel / group, output_channel])
        if group == 1:
            # This is the common-case. Convolve the input without any further complications.
            output = convolve(input, kernel)
        else:
            # Split the input into groups and then convolve each of them independently
            input_groups = tf.split(input, group, 3)
            kernel_groups = tf.split(kernel, group, 3)
            output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
            # Concatenate the groups
            output = tf.concat(output_groups, 3)
        # Add the biases
        if biased:
            biases = self.make_var('biases', [output_channel])
            output = tf.nn.bias_add(output, biases)
        if relu:
            # ReLU non-linearity
            output = tf.nn.relu(output, name=scope.name)
        return output

@layer
def relu(self, input, name):
    return tf.nn.relu(input, name=name)

@layer
def max_pool(self, input, kernel, strides, name, padding='SAME'):
    self.validate_padding(padding)
    return tf.nn.max_pool(input,
                          ksize=[1, kernel[0], kernel[1], 1],
                          strides=[1, strides[0], strides[1], 1],
                          padding=padding,
                          name=name)

@layer
def avg_pool(self, input, kernel, strides, name, padding='SAME'):
    self.validate_padding(padding)
    return tf.nn.avg_pool(input,
                          ksize=[1, kernel[0], kernel[1], 1],
                          strides=[1, strides[0], strides[1], 1],
                          padding=padding,
                          name=name)

@layer
def global_average_pooling(self, input, name):
    return tf.reduce_mean(input, axis=[1, 2], keep_dims=True, name=name)

# @layer
# 不用这个是因为在validate的时候,图片尺寸不再固定,而avg_pool里的ksize要求是the list of int
# def global_average_pooling(self, input, name):
#     ksize = [1, ] + input.get_shape().as_list()[1:3] + [1, ]
#     return tf.nn.avg_pool(input,
#                           ksize=ksize,
#                           strides=[1, 1, 1, 1],
#                           padding='VALID',
#                           name=name)

@layer
def lrn(self, input, name, radius=None, alpha=None, beta=None, bias=None):
    return tf.nn.lrn(input,
                     depth_radius=radius,
                     alpha=alpha,
                     beta=beta,
                     bias=bias,
                     name=name)

@layer
def concat(self, inputs, axis, name):
    return tf.concat(values=inputs, axis=axis, name=name)

@layer
def add(self, inputs, name):
    return tf.add_n(inputs, name=name)

@layer
def fc(self, input, num_out, name, relu=True):
    with tf.variable_scope(name) as scope:
        input_shape = input.get_shape()
        if input_shape.ndims == 4:
            # The input is spatial. Vectorize it first.
            dim = 1
            for d in input_shape[1:].as_list():
                dim *= d
            feed_in = tf.reshape(input, [-1, dim])
        else:
            feed_in, dim = (input, input_shape[-1].value)
        weights = self.make_var('weights', shape=[dim, num_out])
        biases = self.make_var('biases', [num_out])
        op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
        fc = op(feed_in, weights, biases, name=scope.name)
        return fc

@layer
def softmax(self, input, name):
    input_shape = map(lambda v: v.value, input.get_shape())
    if len(input_shape) > 2:
        # For certain models (like NiN), the singleton spatial dimensions
        # need to be explicitly squeezed, since they're not broadcast-able
        # in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
        if input_shape[1] == 1 and input_shape[2] == 1:
            input = tf.squeeze(input, squeeze_dims=[1, 2])
        else:
            raise ValueError('Rank 2 tensor input expected for softmax!')
    return tf.nn.softmax(input, name)

@layer
def batch_normalization(self, input, name, is_training, activation_fn=None, scale=True):
    with tf.variable_scope(name) as scope:
        output = slim.batch_norm(
            input, decay=0.9997,
            activation_fn=activation_fn,
            is_training=is_training,
            updates_collections=None,
            scale=scale,
            scope=scope)
        return output

@layer
def dropout(self, input, keep_prob, name):
    keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
    return tf.nn.dropout(input, keep, name=name)

@layer
def resize(self, input, new_size, name):
    return tf.image.resize_bilinear(input, new_size, name=name)

train.py

* coding: utf-8

import os

import numpy as np
import tensorflow as tf

from utils.data_handle import save_weight, load_weight
from utils.image_process import prepare_label, inv_preprocess, decode_labels
from utils.image_reader import ImageReader

def convert_to_calculateloss(raw_output, num_classes, label_batch, isFull = False):
if isFull:
raw_groundtruth = tf.reshape(tf.squeeze(label_batch, squeeze_dims=[3]), [-1, ])
else:
label_proc = prepare_label(label_batch, raw_output.get_shape()[1:3],
num_classes=num_classes, one_hot=False) # [batch_size, h, w]
raw_groundtruth = tf.reshape(label_proc, [-1, ])

raw_prediction = tf.reshape(raw_output, [-1, num_classes])

indices = tf.squeeze(tf.where(tf.less_equal(raw_groundtruth, num_classes - 1)), 1)
label = tf.cast(tf.gather(raw_groundtruth, indices), tf.int32)  # [?, ]
logits = tf.gather(raw_prediction, indices)  # [?, num_classes]

return label, logits

def train(args, dbargs):

if args.train_on_16:
    from models.deeplabnet_s16 import DeepLabV2, DeepLabV3
    from models.resnet_s16 import ResNet38, ResNet101
    args.batch_size = 10
elif args.train_on_4:
    from models.deeplabnet_s4 import DeepLabV2, DeepLabV3
    from models.resnet_s4 import ResNet38, ResNet101
    args.batch_size = 1
else:
    from models.deeplabnet_s8 import DeepLabV2, DeepLabV3
    from models.resnet_s8 import ResNet38, ResNet101
    args.batch_size = 3

def choose_model(model_name, base_model, image_batch):
    def choose_base_model(base_model):
        if base_model == 'resnet38':
            net = ResNet38(inputs={'data': image_batch}).terminals[-1]
        elif base_model == 'resnet101':
            net = ResNet101(inputs={'data': image_batch}).terminals[-1]
        return net

    net = choose_base_model(base_model)
    if model_name == 'deeplabv2':
        net = DeepLabV2(inputs={net.op.name: net})
    elif model_name == 'deeplabv3':
        net = DeepLabV3(inputs={net.op.name: net})

    return net

## set hyparameter
img_mean = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
tf.set_random_seed(args.random_seed)
coord = tf.train.Coordinator()

## load data
with tf.name_scope("create_inputs"):
    reader = ImageReader(
        args.data_dir,
        args.img_size,
        args.random_scale,
        args.random_mirror,
        args.random_crop,
        args.ignore_label,
        args.is_val,
        img_mean,
        coord)
    image_batch, label_batch = reader.dequeue(args.batch_size)

## load model
net = choose_model(args.model_name, args.base_model, image_batch)
raw_output = net.get_output()

predict_batch = net.topredict(raw_output, tf.shape(image_batch)[1:3])

# 1/16 * 513
label, logits = convert_to_calculateloss(raw_output, args.num_classes, label_batch)

# 1 * 513
labels_full, logits_full = convert_to_calculateloss(tf.image.resize_bilinear(raw_output,tf.shape(label_batch)[1:3]), args.num_classes, label_batch, isFull = True)
predict_label = tf.argmax(logits, axis=1)
pridict_full_label = tf.argmax(logits_full, axis = 1)
print("Model load completed!")

## get all kinds of variables list
def printV(var_list):
    for var in var_list:
        print(var)
    print("--------------------------------")

basemodel_var = [v for v in tf.global_variables() if 'fc' not in v.name]  # restore pretrained model
if args.is_training:
    all_trainable_var = [v for v in tf.trainable_variables()]
    print('batch normalization parameters are trained with decay = 0.9997')
else:
    all_trainable_var = [v for v in tf.trainable_variables() if 'beta' not in v.name or 'gamma' not in v.name]
    print('batch normalization parameters are freezed')
conv_trainable_var = [v for v in all_trainable_var if 'fc' not in v.name]
fc_trainable_var = [v for v in all_trainable_var if 'fc' in v.name]
fc_trainable_w_var = [v for v in fc_trainable_var if 'weight' in v.name]
fc_trainable_b_var = [v for v in fc_trainable_var if 'biases' in v.name]

## set loss
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits))
#l2_loss = [args.weight_decay * tf.nn.l2_loss(w) for w in fc_trainable_w_var]
#loss = tf.add(loss, tf.add_n(l2_loss))
loss_var, loss_op = tf.metrics.mean(loss)

# output_stride: 16 miou
iou_var, iou_op = tf.metrics.mean_iou(label, predict_label, args.num_classes)

# output_stride: 1 miou
iou_full_var, iou_full_op = tf.metrics.mean_iou(labels_full, pridict_full_label, args.num_classes)

accuracy_var, acc_op = tf.metrics.accuracy(label, predict_label)
metrics_op = tf.group(loss_op, iou_op, iou_full_op, acc_op)

## set optimizer
iterstep = tf.placeholder(dtype=tf.float32, shape=[], name='iteration_step')

base_lr = tf.constant(args.learning_rate, dtype=tf.float32, shape=[])
lr = tf.scalar_mul(base_lr,
                   tf.pow((1 - iterstep / args.num_steps), args.power))  # learning rate reduce with the time

train_basemodel_op = tf.train.MomentumOptimizer(learning_rate=lr,
                                                momentum=args.momentum).minimize(loss,
                                                                                 var_list=conv_trainable_var)
train_fc_op = tf.train.MomentumOptimizer(learning_rate=lr,
                                         momentum=args.momentum).minimize(loss,
                                                                          var_list=fc_trainable_var)
train_fc_w_op = tf.train.MomentumOptimizer(learning_rate=lr * 10,
                                           momentum=args.momentum).minimize(loss,
                                                                            var_list=fc_trainable_w_var)
train_fc_b_op = tf.train.MomentumOptimizer(learning_rate=lr * 20,
                                           momentum=args.momentum).minimize(loss,
                                                                            var_list=fc_trainable_b_var)
train_all_op = tf.group(train_basemodel_op, train_fc_op)

# train_all_op = tf.group(train_basemodel_op, train_fc_w_op, train_fc_b_op)
# train_fc_op = tf.group(train_fc_w_op, train_fc_b_op)

## set summary
vs_image = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, img_mean], tf.uint8)
vs_label = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)
vs_predict = tf.py_func(decode_labels, [predict_batch, args.save_num_images, args.num_classes], tf.uint8)
tf.summary.image(name='image collection_train', tensor=tf.concat(axis=2, values=[vs_image, vs_label, vs_predict]),
                 max_outputs=args.save_num_images)
tf.summary.scalar('loss_train', loss_var)
tf.summary.scalar('iou_belif_train', iou_var)
tf.summary.scalar('iou_full_train', iou_full_var)
tf.summary.scalar('accuracy_train', accuracy_var)

summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(dbargs['log_dir'], graph=tf.get_default_graph(), max_queue=5)

## set session
print("GPU")
os.system("echo $CUDA_VISIBLE_DEVICES")

sess = tf.Session()
global_init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()
sess.run(global_init)
sess.run(local_init)

## set saver
saver_g = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
trained_step = 0
if os.path.exists(dbargs['restore_from'] + 'checkpoint'):
    trained_step = load_weight(dbargs['restore_from'], saver_g, sess)
else:
    saver_b = tf.train.Saver(var_list=basemodel_var)
    load_weight(dbargs['baseweight_from'], saver_b, sess)

with open(dbargs['restore_from']+'info.me', 'w') as info:
    for k, v in vars(args).items():
        info.write('%s: %s\n' % (str(k), str(v)))

threads = tf.train.start_queue_runners(sess, coord)
print("start training")

## start training
for step in range(args.num_steps):
    if args.from_epoch_0:
        now_step = step
    else:
        now_step = int(trained_step) + step if trained_step is not None else step
    if now_step >= args.num_steps:
        break
    feed_dict = {iterstep: now_step}
    label_batch_, losses, lrate, iou, _, _ = sess.run([label_batch, loss_var, lr, iou_var, train_all_op, metrics_op], feed_dict)
    if step % args.save_pred_every == 0:
        save_weight(dbargs['restore_from'], saver_g, sess, now_step)

    if step % 50 == 0:
        print('step:{}\tlr {}\tloss = {}\tious:{}'.format(now_step, lrate, losses, iou))
        summary_str = sess.run(summary_op, feed_dict)
        summary_writer.add_summary(summary_str, now_step)
        sess.run(local_init)

## end training
coord.request_stop()
coord.join(threads)

gen_trainIDs.py

from PIL import Image
import numpy as np

label_list='/data/rui.wu/irfan/gan_seg/DAG4Seg/D_deeplab/dataset/val_label.txt'
id_to_trainid = {-1: 255, 0: 255, 1: 255, 2: 255,
3: 255, 4: 255, 5: 255, 6: 255,
7: 0, 8: 1, 9: 255, 10: 255, 11: 2, 12: 3, 13: 4,
14: 255, 15: 255, 16: 255, 17: 5,
18: 255, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: 255, 30: 255, 31: 16, 32: 17, 33: 18}
label_dir="/data/rui.wu/Elijha/dataset/gtFine/gtFine/val/munster/munster_000100_000019_gtFine_labelIds.png"
mask = Image.open(label_dir)
mask = np.array(mask)
mask_copy = mask.copy()
for k, v in id_to_trainid.items():
mask_copy[mask == k] = v
mask = Image.fromarray(mask_copy.astype(np.uint8))
print('done!')
mask.save(label_dir.replace('labelIds','labelTrainIds'))

#f = open(label_list, 'r')

#for line in f:

label_dir=line.replace('\n','')

mask = Image.open(label_dir)

mask = np.array(mask)

mask_copy = mask.copy()

for k, v in id_to_trainid.items():

#    mask_copy[mask == k] = v

mask = Image.fromarray(mask_copy.astype(np.uint8))

print('done!'+line)

mask.save(label_dir.replace('labelIds','labelTrainIds'))

deeplabv3_s4

coding:utf-8

import tensorflow as tf

from models.network import NetWork

class DeepLabV2(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitems()[0]
assert type(inputs) == str
(self.feed(inputs)
.atrous_conv([3, 3], num_classes, 6, padding='SAME', relu=False, name='fc1_voc12_c0'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 12, padding='SAME', relu=False, name='fc1_voc12_c1'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 18, padding='SAME', relu=False, name='fc1_voc12_c2'))

    (self.feed(inputs)
     .atrous_conv([3, 3], num_classes, 24, padding='SAME', relu=False, name='fc1_voc12_c3'))

    (self.feed('fc1_voc12_c0',
               'fc1_voc12_c1',
               'fc1_voc12_c2',
               'fc1_voc12_c3')
     .add(name='fc1_voc12'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

class DeepLabV3(NetWork):
def setup(self, is_training, num_classes):
inputs = self.inputs.popitem()[0]
assert type(inputs) == str

    (self.feed(inputs)
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2a')
     .atrous_conv([3, 3], 512, 16, padding='SAME', biased=False, relu=False, name='fc_res6a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6a_branch2c'))

    (self.feed(inputs,
               'fc_bn6a_branch2c')
     .add(name='fc_res6a')
     .relu(name='fc_res6a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res6b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6b_branch2c'))

    (self.feed('fc_res6a_relu',
               'fc_bn6b_branch2c')
     .add(name='fc_res6b')
     .relu(name='fc_res6b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res6c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res6c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn6c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res6c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn6c_branch2c'))

    (self.feed('fc_res6b_relu',
               'fc_bn6c_branch2c')
     .add(name='fc_res6c')
     .relu(name='fc_res6c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2a')
     .atrous_conv([3, 3], 512, 32, padding='SAME', biased=False, relu=False, name='fc_res7a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7a_branch2c'))

    (self.feed('fc_res6c_relu',
               'fc_bn7a_branch2c')
     .add(name='fc_res7a')
     .relu(name='fc_res7a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res7b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7b_branch2c'))

    (self.feed('fc_res7a_relu',
               'fc_bn7b_branch2c')
     .add(name='fc_res7b')
     .relu(name='fc_res7b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res7c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2a')
     .atrous_conv([3, 3], 512, 128, padding='SAME', biased=False, relu=False, name='fc_res7c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn7c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res7c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn7c_branch2c'))

    (self.feed('fc_res7b_relu',
               'fc_bn7c_branch2c')
     .add(name='fc_res7c')
     .relu(name='fc_res7c_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8a_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2a')
     .atrous_conv([3, 3], 512, 64, padding='SAME', biased=False, relu=False, name='fc_res8a_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8a_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8a_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8a_branch2c'))

    (self.feed('fc_res7c_relu',
               'fc_bn8a_branch2c')
     .add(name='fc_res8a')
     .relu(name='fc_res8a_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8b_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2a')
     .atrous_conv([3, 3], 512, 128, padding='SAME', biased=False, relu=False, name='fc_res8b_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8b_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8b_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8b_branch2c'))

    (self.feed('fc_res8a_relu',
               'fc_bn8b_branch2c')
     .add(name='fc_res8b')
     .relu(name='fc_res8b_relu')
     .conv([1, 1], 512, [1, 1], biased=False, relu=False, name='fc_res8c_branch2a')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2a')
     .atrous_conv([3, 3], 512, 256, padding='SAME', biased=False, relu=False, name='fc_res8c_branch2b')
     .batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='fc_bn8c_branch2b')
     .conv([1, 1], 2048, [1, 1], biased=False, relu=False, name='fc_res8c_branch2c')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc_bn8c_branch2c'))

    # gzy
    (self.feed('fc_res8b_relu',
               'fc_bn8c_branch2c') 
     .add(name='fc_res8c')
     .relu(name='fc_res8c_relu'))
     

    (self.feed('fc_res8b_relu') # cichushaoxiugai origina: fc_res8c_relu
     .atrous_conv([3, 3], 256, 24, padding='SAME', relu=False, name='fc1_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn0'))

    (self.feed('fc_res8b_relu')
     .atrous_conv([3, 3], 256, 48, padding='SAME', relu=False, name='fc1_voc12_c1')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn1'))

    (self.feed('fc_res8b_relu')
     .atrous_conv([3, 3], 256, 72, padding='SAME', relu=False, name='fc1_voc12_c2')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn2'))

    (self.feed('fc_res8b_relu')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c3')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn3'))

    layer = self.get_appointed_layer('fc_res8c_relu')
    new_shape = tf.shape(layer)[1:3]
    (self.feed('fc_res8b_relu')
     .global_average_pooling(name='fc1_voc12_mp0')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc1_voc12_c4')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc1_voc12_bn4')
     .resize(new_shape, name='fc1_voc12_bu0'))

    (self.feed('fc1_voc12_bn0',
               'fc1_voc12_bn1',
               'fc1_voc12_bn2',
               'fc1_voc12_bn3',
               'fc1_voc12_bu0')
     .concat(axis=3, name='fc1_voc12'))

    (self.feed('fc1_voc12')
     .conv([1, 1], 256, [1, 1], relu=False, name='fc2_voc12_c0')
     .batch_normalization(is_training=is_training, activation_fn=None, name='fc2_voc12_bn0')
     .conv([1, 1], num_classes, [1, 1], relu=False, name='fc2_voc12_c1'))

def topredict(self, raw_output, origin_shape):
    raw_output = tf.image.resize_bilinear(raw_output, origin_shape)
    raw_output = tf.argmax(raw_output, dimension=3)
    prediction = tf.expand_dims(raw_output, dim=3)
    return prediction

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.