ADD file via upload
This commit is contained in:
parent
4844cd73c9
commit
27706881b9
|
@ -0,0 +1,151 @@
|
|||
import os
|
||||
os.environ['TL_BACKEND'] = 'tensorflow'
|
||||
import time
|
||||
import tensorlayer as tl
|
||||
|
||||
from torchsummary import summary
|
||||
from tensorlayer import logging
|
||||
from tensorlayer.files import (assign_weights,load_cifar10_dataset,)
|
||||
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Elementwise, GlobalMeanPool2d, Input, MaxPool2d,MeanPool2d,AdaptiveMeanPool2d)
|
||||
from tensorlayer.layers import Module, SequentialLayer
|
||||
|
||||
class _DenseLayer(Module):
|
||||
#bn-relu-conv1x1-bn-relu-conv3x3
|
||||
"""docstring for _DenseLayer"""
|
||||
def __init__(self, in_channels, growth_rate, bn_size, memory_efficient=False):
|
||||
super(_DenseLayer,self).__init__()
|
||||
self.bn1 = BatchNorm(num_features=bn_size * growth_rate,act='relu')
|
||||
#self.relu1 = PReLU()
|
||||
self.conv1 = Conv2d(growth_rate*bn_size, (1,1), in_channels=in_channels, strides=(1,1),padding='SAME',b_init=None)
|
||||
self.bn2 = BatchNorm(num_features=growth_rate,act='relu')
|
||||
#self.relu2 = PReLU()
|
||||
self.conv2 = Conv2d(growth_rate, (3,3), in_channels=bn_size * growth_rate, strides=(1,1),padding='SAME',b_init=None)
|
||||
self.add = Elementwise(tl.add, act='relu')
|
||||
|
||||
def forward(self,inputs):
|
||||
output = self.bn1(inputs)
|
||||
#output = self.relu1(output)
|
||||
output = self.conv1(output)
|
||||
output = self.bn2(output)
|
||||
#output = self.relu2(output)
|
||||
output = self.conv2(output)
|
||||
result = self.add([output, inputs])
|
||||
return result
|
||||
class _DenseBlock(Module):
|
||||
"""docstring for ClassName"""
|
||||
def __init__(self, num_layers, in_channels, bn_size, growth_rate):
|
||||
super(_DenseBlock, self).__init__()
|
||||
self.layers = []
|
||||
for i in range(num_layers):
|
||||
self.layers.append(_DenseLayer(in_channels + i*growth_rate,growth_rate,bn_size))
|
||||
self.dense_block = SequentialLayer(self.layers)
|
||||
def forward(self):
|
||||
return self.dense_block
|
||||
|
||||
|
||||
class _Transition(Module):
|
||||
"""docstring for _Transition"""
|
||||
def __init__(self, num_input_features,num_output_features):
|
||||
super(_Transition, self).__init__()
|
||||
self.bn1 = BatchNorm(num_features=num_output_features,act='relu')
|
||||
self.conv1 = Conv2d(num_output_features,in_channels=num_input_features)
|
||||
self.pooling = MeanPool2d((2,2),strides=(2,2),padding='SAME')
|
||||
#self.add = Elementwise(tl.add, act='relu')
|
||||
def forward(self,inputs):
|
||||
output = self.bn(inputs)
|
||||
output = self.conv1(output)
|
||||
output = self.pooling(output)
|
||||
return output
|
||||
|
||||
|
||||
class Densenet(Module):
|
||||
"""docstring for ClassName"""
|
||||
def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16),
|
||||
bn_size=4, num_classes=10,num_init_features=24):
|
||||
super(Densenet, self).__init__()
|
||||
num_init_feature = 2 * growth_rate
|
||||
|
||||
#self.layers_list = []
|
||||
self.conv1 = Conv2d(num_init_features,(7,7),strides=(2,2),padding="valid",in_channels=3)
|
||||
self.bn1 = BatchNorm(num_features=num_init_features,act='relu')
|
||||
self.pooling = MaxPool2d((3, 3), strides=(2, 2))
|
||||
self.layers_list = []
|
||||
num_input_features = num_init_features
|
||||
for i,num_layers in enumerate(block_config):
|
||||
block = _DenseBlock(num_layers,num_input_features, bn_size,growth_rate)
|
||||
self.layers_list.append(block)
|
||||
num_input_features = num_input_features + growth_rate * num_layers
|
||||
if i != len(block_config) - 1:
|
||||
trans = _Transition(num_input_features=num_input_features,
|
||||
num_output_features=int(num_input_features // 2))
|
||||
self.layers_list.append(trans)
|
||||
num_input_features = int(num_input_features // 2)
|
||||
self.layers_list.append(BatchNorm(num_features=num_input_features,act='relu'))
|
||||
self.layers_list.append(AdaptiveMeanPool2d((1,1)))
|
||||
self.features = SequentialLayer(self.layers_list)
|
||||
self.classifier = Dense(in_channels = num_input_features, n_units = num_classes)
|
||||
#self.classfier =
|
||||
|
||||
def forward(self,x):
|
||||
output = self.conv1(x)
|
||||
output = self.bn1(output)
|
||||
output = self.pooling(output)
|
||||
output = self.features(output)
|
||||
output = features.view(output.size(0), -1)
|
||||
output = self.classifier(output)
|
||||
return output
|
||||
|
||||
def DenseNet(s: str):
|
||||
if(s == 'densenet-100'):
|
||||
return Densenet(growth_rate=32, block_config=(6, 12, 18, 16),num_init_features=64, num_classes=10)
|
||||
|
||||
if(s == 'densenet-121'):
|
||||
return Densenet(growth_rate=32, block_config=(6, 12, 24, 16),num_init_features=64, num_classes=1000)
|
||||
|
||||
def densenet121():
|
||||
return densenet(growth_rate=32, block_config = (6, 12, 24, 16), num_init_features=64, num_classes=1000)
|
||||
|
||||
|
||||
def densenet161(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
|
||||
return densenet(growth_rate=48, block_config = (6, 12, 36, 24), num_init_features=96, num_classes=1000)
|
||||
|
||||
|
||||
|
||||
def densenet169(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
|
||||
return densenet(growth_rate=32, block_config = (6, 12, 32, 32), num_init_features=64, num_classes=1000)
|
||||
|
||||
def densenet201(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
|
||||
return densenet(growth_rate=32, block_config = (6, 12, 48, 32), num_init_features=64, num_classes=1000)
|
||||
|
||||
|
||||
|
||||
tl.logging.set_verbosity(tl.logging.DEBUG)
|
||||
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
|
||||
|
||||
def generator_train():
|
||||
inputs = X_train
|
||||
targets = y_train
|
||||
if len(inputs) != len(targets):
|
||||
raise AssertionError("The length of inputs and targets should be equal")
|
||||
for _input, _target in zip(inputs, targets):
|
||||
yield (_input, np.array(_target))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
net = DenseNet('densenet-100')
|
||||
n_epoch = 500
|
||||
batch_size = 128
|
||||
print_freq = 2
|
||||
shuffle_buffer_size = 128
|
||||
train_weights = net.trainable_weights
|
||||
optimizer = tl.optimizers.Momentum(0.05, 0.9)
|
||||
train_ds = tl.dataflow.FromGenerator(
|
||||
generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label']
|
||||
)
|
||||
train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size)
|
||||
train_ds = tl.dataflow.Batch(train_ds,batch_size)
|
||||
model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer)
|
||||
model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False)
|
||||
model.save_weights('./DenseNet.npz', format='npz_dict')
|
||||
model.load_weights('./DenseNet.npz', format='npz_dict')
|
||||
|
Loading…
Reference in New Issue