Skip to content
Permalink
Browse files

Automated rollback of commit e117200

PiperOrigin-RevId: 238803609
  • Loading branch information...
tensorflower-gardener committed Mar 16, 2019
1 parent 18ad215 commit 2da5a30585994cd728015d3e68275698c704ba66
Showing with 1 addition and 93 deletions.
  1. +1 −93 tensorflow/lite/kernels/fully_connected_test.cc
@@ -132,6 +132,7 @@ static float fully_connected_golden_output[] = {

class BaseFullyConnectedOpModel : public SingleOpModel {
public:
// TODO(ahentz): test different activation types too.
BaseFullyConnectedOpModel(
TfLiteRegistration* registration, int units, int batches,
const TensorData& input, const TensorData& output = {TensorType_FLOAT32},
@@ -427,99 +428,6 @@ TEST(FloatFullyConnectedOpTest, SimpleTestNoBias) {
EXPECT_THAT(m.GetOutput(), ElementsAre(10, 8));
}

TEST(FloatFullyConnectedOpTest, ActivationRelu6) {
// The optimized kernel assumes that the bias is specified.
FloatFullyConnectedOpModel m(
ops::builtin::Register_FULLY_CONNECTED_PIE(),
/*units=*/1, /*batches=*/2,
/*input=*/{TensorType_FLOAT32, {2, 2}},
/*output=*/{TensorType_FLOAT32},
/*bias_tensor_optional=*/false,
/*ActivationFunctionType*/ ActivationFunctionType_RELU6);
m.SetWeights({
2, 4, // u = 0
});

m.SetInput({
1, 2, // b = 0
2, 1, // b = 1
});

m.Invoke();

EXPECT_THAT(m.GetOutput(), ElementsAre(6, 6));
}

TEST(FloatFullyConnectedOpTest, ActivationTanh) {
// The optimized kernel assumes that the bias is specified.
FloatFullyConnectedOpModel m(
ops::builtin::Register_FULLY_CONNECTED_PIE(),
/*units=*/1, /*batches=*/2,
/*input=*/{TensorType_FLOAT32, {2, 2}},
/*output=*/{TensorType_FLOAT32},
/*bias_tensor_optional=*/false,
/*ActivationFunctionType*/ ActivationFunctionType_TANH);
m.SetWeights({
-2, 1, // u = 0
});

m.SetInput({
1, 4, // b = 0
2, 1, // b = 1
});

m.Invoke();

EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({0.964028, -0.995055})));
}

TEST(FloatFullyConnectedOpTest, ActivationSign) {
FloatFullyConnectedOpModel m(
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
/*units=*/1, /*batches=*/2,
/*input=*/{TensorType_FLOAT32, {2, 2}},
/*output=*/{TensorType_FLOAT32},
/*bias_tensor_optional=*/false,
/*ActivationFunctionType*/ ActivationFunctionType_SIGN_BIT);
m.SetWeights({
2, 4, // u = 0
});
m.SetBias({1});

m.SetInput({
1, -2, // b = 0
-2, 1, // b = 1
});

m.Invoke();

EXPECT_THAT(m.GetOutput(), ElementsAre(-5, 1));
}

TEST(FloatFullyConnectedOpTest, ActivationN1) {
FloatFullyConnectedOpModel m(
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
/*units=*/1, /*batches=*/2,
/*input=*/{TensorType_FLOAT32, {2, 2}},
/*output=*/{TensorType_FLOAT32},
/*bias_tensor_optional=*/false,
/*ActivationFunctionType*/ ActivationFunctionType_RELU_N1_TO_1);
m.SetWeights({
2, 4, // u = 0
});
m.SetBias({1});

m.SetInput({
1, -2, // b = 0
-2, 1, // b = 1
});

m.Invoke();

EXPECT_THAT(m.GetOutput(), ElementsAre(-1, 1));
}

TEST_P(QuantizedFullyConnectedOpTest, SimpleTestQuantizedUint8) {
QuantizedFullyConnectedOpModel m(
GetRegistration(), /*units=*/3, /*batches*/ 2,

0 comments on commit 2da5a30

Please sign in to comment.
You can’t perform that action at this time.