Source code WeChat:kaifa873
if(opType!=MNN::OpType_Convolution&&opType!=MNN::OpType_ConvolutionDepthwise&&
opType!=MNN::OpType_Eltwise){
continue;
}
auto tensorsPair=_opInfo.find(op->name);
if(tensorsPair==_opInfo.end()){
MNN_ERROR(“Can’t find tensors for%sn”,op->name.c_str());
}
if(opType==MNN::OpType_Eltwise){
auto param=op->main.AsEltwise();
//Now only support AddInt8
if(param->type!=MNN::EltwiseType_SUM){
continue;
}
const auto&inputScale0=_scales[tensorsPair->second.first[0]];
const auto&inputScale1=_scales[tensorsPair->second.first[1]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
const int outputScaleSize=outputScale.size();
std::vector<float>outputInvertScale(outputScaleSize);
Helper::invertData(outputInvertScale.data(),outputScale.data(),outputScaleSize);