samples: tflite-micro: Use FVP for test validation
What is changed?
- Replaced printk with printf in the sample
Why do we need this change?
- Running the sample with fvp just prints the below output and not
the complete logs that we expect:
`sender 0: Sending inference`
We get the expected output with CONFIG_LOG_IMMEDIATE=y but,
its not ideal. printf works without the config and plus it
looks a better fit in the current sample than printk.
This change is validate using `mps3/corstone300/fvp`.
Signed-off-by: Sudan Landge <sudan.landge@arm.com>
This commit is contained in:
parent
d38e40dcdd
commit
767e03f082
2 changed files with 22 additions and 22 deletions
|
|
@ -28,7 +28,7 @@ bool copyOutput(const TfLiteTensor &src, InferenceProcess::DataPtr &dst)
|
|||
}
|
||||
|
||||
if (src.bytes > dst.size) {
|
||||
printk("Tensor size mismatch (bytes): actual=%d, expected%d.\n", src.bytes,
|
||||
printf("Tensor size mismatch (bytes): actual=%d, expected%d.\n", src.bytes,
|
||||
dst.size);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -112,7 +112,7 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
/* Get model handle and verify that the version is correct */
|
||||
const tflite::Model *model = ::tflite::GetModel(job.networkModel.data);
|
||||
if (model->version() != TFLITE_SCHEMA_VERSION) {
|
||||
printk("Model schema version unsupported: version=%" PRIu32 ", supported=%d.\n",
|
||||
printf("Model schema version unsupported: version=%" PRIu32 ", supported=%d.\n",
|
||||
model->version(), TFLITE_SCHEMA_VERSION);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -126,12 +126,12 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
/* Allocate tensors */
|
||||
TfLiteStatus allocate_status = interpreter.AllocateTensors();
|
||||
if (allocate_status != kTfLiteOk) {
|
||||
printk("Failed to allocate tensors for inference. job=%p\n", &job);
|
||||
printf("Failed to allocate tensors for inference. job=%p\n", &job);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (job.input.size() != interpreter.inputs_size()) {
|
||||
printk("Number of job and network inputs do not match. input=%zu, network=%zu\n",
|
||||
printf("Number of job and network inputs do not match. input=%zu, network=%zu\n",
|
||||
job.input.size(), interpreter.inputs_size());
|
||||
return true;
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
const TfLiteTensor *tensor = interpreter.input(i);
|
||||
|
||||
if (input.size != tensor->bytes) {
|
||||
printk("Input tensor size mismatch. index=%zu, input=%zu, network=%u\n", i,
|
||||
printf("Input tensor size mismatch. index=%zu, input=%zu, network=%u\n", i,
|
||||
input.size, tensor->bytes);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -154,14 +154,14 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
/* Run the inference */
|
||||
TfLiteStatus invoke_status = interpreter.Invoke();
|
||||
if (invoke_status != kTfLiteOk) {
|
||||
printk("Invoke failed for inference. job=%s\n", job.name.c_str());
|
||||
printf("Invoke failed for inference. job=%s\n", job.name.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Copy output data */
|
||||
if (job.output.size() > 0) {
|
||||
if (interpreter.outputs_size() != job.output.size()) {
|
||||
printk("Number of job and network outputs do not match. job=%zu, network=%u\n",
|
||||
printf("Number of job and network outputs do not match. job=%zu, network=%u\n",
|
||||
job.output.size(), interpreter.outputs_size());
|
||||
return true;
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
|
||||
if (job.expectedOutput.size() > 0) {
|
||||
if (job.expectedOutput.size() != interpreter.outputs_size()) {
|
||||
printk("Number of job and network expected outputs do not match. job=%zu, network=%zu\n",
|
||||
printf("Number of job and network expected outputs do not match. job=%zu, network=%zu\n",
|
||||
job.expectedOutput.size(), interpreter.outputs_size());
|
||||
return true;
|
||||
}
|
||||
|
|
@ -185,7 +185,7 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
const TfLiteTensor *output = interpreter.output(i);
|
||||
|
||||
if (expected.size != output->bytes) {
|
||||
printk("Expected output tensor size mismatch. index=%u, expected=%zu, network=%zu\n",
|
||||
printf("Expected output tensor size mismatch. index=%u, expected=%zu, network=%zu\n",
|
||||
i, expected.size, output->bytes);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -193,7 +193,7 @@ bool InferenceProcess::runJob(InferenceJob &job)
|
|||
for (unsigned int j = 0; j < output->bytes; ++j) {
|
||||
if (output->data.uint8[j] !=
|
||||
static_cast<uint8_t *>(expected.data)[j]) {
|
||||
printk("Expected output tensor data mismatch. index=%u, offset=%u, expected=%02x, network=%02x\n",
|
||||
printf("Expected output tensor data mismatch. index=%u, offset=%u, expected=%02x, network=%02x\n",
|
||||
i, j, static_cast<uint8_t *>(expected.data)[j],
|
||||
output->data.uint8[j]);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ void *allocateHeap(const size_t size)
|
|||
uint8_t *buf = static_cast<uint8_t *>(k_malloc(size));
|
||||
|
||||
if ((buf == nullptr) || (heap == nullptr)) {
|
||||
printk("Heap allocation failed. heap=%p, buf=%p, size=%zu\n", heap, buf, size);
|
||||
printf("Heap allocation failed. heap=%p, buf=%p, size=%zu\n", heap, buf, size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -133,17 +133,17 @@ void inferenceProcessTask(void *_name, void *heap, void *_params)
|
|||
xInferenceJob *job =
|
||||
static_cast<xInferenceJob *>(k_queue_get(params->queueHandle, Z_FOREVER));
|
||||
|
||||
printk("%s: Received inference job. job=%p\n", name->c_str(), job);
|
||||
printf("%s: Received inference job. job=%p\n", name->c_str(), job);
|
||||
|
||||
/* Run inference */
|
||||
job->status = inferenceProcess.runJob(*job);
|
||||
|
||||
printk("%s: Sending inference response. job=%p\n", name->c_str(), job);
|
||||
printf("%s: Sending inference response. job=%p\n", name->c_str(), job);
|
||||
|
||||
/* Return inference message */
|
||||
int ret = k_queue_alloc_append(job->responseQueue, job);
|
||||
if (0 != ret) {
|
||||
printk("%s: Failed to send message\n", name->c_str());
|
||||
printf("%s: Failed to send message\n", name->c_str());
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -177,13 +177,13 @@ void inferenceSenderTask(void *_name, void *heap, void *_queue)
|
|||
{ DataPtr(expectedOutputData, sizeof(expectedOutputData)) },
|
||||
&senderQueue);
|
||||
|
||||
printk("%s: Sending inference. job=%p, name=%s\n", name->c_str(), &job,
|
||||
printf("%s: Sending inference. job=%p, name=%s\n", name->c_str(), &job,
|
||||
job.name.c_str());
|
||||
|
||||
/* Queue job */
|
||||
ret = k_queue_alloc_append(inferenceQueue, &job);
|
||||
if (0 != ret) {
|
||||
printk("%s: Failed to send message\n", name->c_str());
|
||||
printf("%s: Failed to send message\n", name->c_str());
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -193,7 +193,7 @@ void inferenceSenderTask(void *_name, void *heap, void *_queue)
|
|||
xInferenceJob *job =
|
||||
static_cast<xInferenceJob *>(k_queue_get(&senderQueue, Z_FOREVER));
|
||||
|
||||
printk("%s: Received job response. job=%p, status=%u\n", name->c_str(), job,
|
||||
printf("%s: Received job response. job=%p, status=%u\n", name->c_str(), job,
|
||||
job->status);
|
||||
|
||||
totalCompletedJobs++;
|
||||
|
|
@ -229,7 +229,7 @@ int main()
|
|||
const size_t stackSize = 2048;
|
||||
k_thread_stack_t *stack = static_cast<k_thread_stack_t *>(k_malloc(stackSize));
|
||||
if (stack == nullptr) {
|
||||
printk("Failed to allocate stack to 'inferenceSenderTask%i'\n", n);
|
||||
printf("Failed to allocate stack to 'inferenceSenderTask%i'\n", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -239,7 +239,7 @@ int main()
|
|||
thread.id = k_thread_create(&thread.thread, stack, stackSize, inferenceSenderTask,
|
||||
name, heapPtr, &inferenceQueue, 3, 0, K_FOREVER);
|
||||
if (thread.id == 0) {
|
||||
printk("Failed to create 'inferenceSenderTask%i'\n", n);
|
||||
printf("Failed to create 'inferenceSenderTask%i'\n", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -252,7 +252,7 @@ int main()
|
|||
const size_t stackSize = 8192;
|
||||
k_thread_stack_t *stack = static_cast<k_thread_stack_t *>(k_malloc(stackSize));
|
||||
if (stack == nullptr) {
|
||||
printk("Failed to allocate stack to 'inferenceSenderTask%i'\n", n);
|
||||
printf("Failed to allocate stack to 'inferenceSenderTask%i'\n", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -265,7 +265,7 @@ int main()
|
|||
thread.id = k_thread_create(&thread.thread, stack, stackSize, inferenceProcessTask,
|
||||
name, heapPtr, &taskParam, 2, 0, K_FOREVER);
|
||||
if (thread.id == 0) {
|
||||
printk("Failed to create 'inferenceProcessTask%i'\n", n);
|
||||
printf("Failed to create 'inferenceProcessTask%i'\n", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ int main()
|
|||
/* Safety belt */
|
||||
k_thread_suspend(k_current_get());
|
||||
|
||||
printk("Zephyr application failed to initialise \n");
|
||||
printf("Zephyr application failed to initialise \n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue