add dacsdc model quant check

This commit is contained in:
fffasttime 2022-11-14 21:03:38 +08:00
parent e871c91f6e
commit 71737900d1
4 changed files with 32 additions and 14 deletions

View File

@ -195,7 +195,7 @@ class QuantActivConv2d(nn.Module):
tmp = torch.tensor(self.filter_size * in_shape[-1] * in_shape[-2], dtype=torch.float)
self.size_product.copy_(tmp)
out = self.activ(input)
## print('ii',input[0,0,:,:]/self.activ.step)
## print('ii',input[0,0,:,0]/self.activ.step)
## print('convi', torch.round(out[0,0,:,0]/self.activ.step).int())
## wstd = self.conv.weight.std()
out = self.conv(out)

View File

@ -139,6 +139,7 @@ def extract_model(in_shape):
elif isinstance(sub_module, torch.nn.MaxPool2d):
feature_map_shape[1] = feature_map_shape[1] // sub_module.kernel_size
feature_map_shape[2] = feature_map_shape[2] // sub_module.kernel_size
model_param[-1].max_pool = True
if not hasattr(model_param[0], 'abit'): # train code rescaled [0,255] to [0,1) by /256 default
model_param[0].abit = 8
@ -171,7 +172,7 @@ def process_batchnorm(model_param):
incbit = len(bit(inc)); biasbit = len(bit(bias))
larger lshift is better, but MBIT+incbit<48
'''
lshift = 8
lshift = 16
for conv in model_param[:-1]:
print(f'Process bn_{conv.n}, shape {conv.bn_w.shape},', end = ' ')
@ -182,6 +183,8 @@ def process_batchnorm(model_param):
ostep = conv.ostep
inc_raw = conv.bn_w * MACstep / ostep
bias_raw = conv.bn_b / ostep
conv.inc_raw = inc_raw
conv.bias_raw = bias_raw
# Quantization
T = lshift+conv.wbit+conv.abit-1
@ -198,6 +201,7 @@ def process_batchnorm(model_param):
conv_last.inc = None
conv_last.div = 1/(conv_last.wstep * conv_last.astep)
conv_last.bias = np.round(conv_last.convbias * conv_last.div).astype(np.int64)
conv_last.bias_raw = conv_last.convbias * conv_last.div
conv_last.biasbit = bitlength(conv_last.bias)
print(f'conv_last biasbit {conv_last.biasbit}, div {conv_last.div}')

View File

@ -24,17 +24,30 @@ class QConvLayer:
x = F.conv2d(x, self.w, bias=None, stride=self.conv.s, padding=self.conv.p) # [N, OCH, OROW, OCOL]
# print('convo', self.conv.n, x[0,0,:,0])
och = x.shape[1]
if self.conv.inc is not None:
inc_ch = self.conv.inc.reshape((1, och, 1, 1))
x *= inc_ch
if False:
if self.conv.inc is not None:
inc_ch = self.conv.inc.reshape((1, och, 1, 1))
x *= inc_ch
if hasattr(self.conv, 'bias'):
bias_ch = self.conv.bias.reshape((1, och, 1, 1))
x += bias_ch
if hasattr(self.conv, 'bias'):
bias_ch = self.conv.bias.reshape((1, och, 1, 1))
x += bias_ch
if hasattr(self.conv, 'lshift'):
x += 1 << self.conv.lshift_T-1
x >>= self.conv.lshift_T
# print('biaso', self.conv.n, x[0,0,:,:]/2**self.conv.lshift_T)
if hasattr(self.conv, 'lshift'):
x += 1 << self.conv.lshift_T-1
x >>= self.conv.lshift_T
else: ## no inc/bias quantization
if self.conv.inc is not None:
inc_ch = self.conv.inc_raw.reshape((1, och, 1, 1))
x *= inc_ch
if hasattr(self.conv, 'bias'):
bias_ch = self.conv.bias_raw.reshape((1, och, 1, 1))
x += bias_ch
# if hasattr(self.conv, 'max_pool'): # maxpool
# x = F.max_pool2d(x, kernel_size = 2, stride = 2)
# print('biaso', self.conv.n, x[0,0,:,0])
x = torch.round(x).to(dtype = torch.int64)
if hasattr(self.conv, 'obit'):
x.clip_(0, 2**(self.conv.obit)-1)

View File

@ -109,6 +109,7 @@ def test(weights=None,
model=None,
dataloader=None,
num_batch=-1):
# torch.set_default_tensor_type(torch.DoubleTensor)
# Initialize/load model and set device
if model is None or type(model)==str:
device = torch_utils.select_device(opt.device, batch_size=batch_size)
@ -202,7 +203,7 @@ if __name__ == '__main__':
parser.add_argument('--img-size', type=int, default=320, help='inference size (pixels)')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--datapath', default='../../dacsdc_dataset', help = 'test dataset path')
parser.add_argument('--verbose', action='store_true', help = 'show predict value result')
parser.add_argument('-v', '--verbose', action='store_true', help = 'show predict value result')
parser.add_argument('--save-pic', action='store_true', help = 'save predict output picture')
parser.add_argument('-nb', '--num-batch', type=int, default='-1', help='num of batchs to run, -1 for full dataset')
opt = parser.parse_args()
@ -217,4 +218,4 @@ if __name__ == '__main__':
opt.model,
num_batch = opt.num_batch)
print(('%s %s.pt\niou %.4f, lsum %.4f, lobj %.4f, lcls %.4f')%(opt.model, opt.weight, *res))
print(('%s %s.pt\niou %.5f, lsum %.4f, lobj %.4f, lcls %.4f')%(opt.model, opt.weight, *res))