I have been trying to get the google coral usb accelerator running under windwos 10 for days.
Installing UsbDk
Installing Windows drivers
Microsoft-PnP-Hilfsprogramm
Treiberpaket wird hinzugefügt: coral.inf
Das Treiberpaket wurde erfolgreich hinzugefügt. (Ist bereits im System vorhanden)
Veröffentlichter Name: oem75.inf
Treiberpaket wird hinzugefügt: Coral_USB_Accelerator.inf
Das Treiberpaket wurde erfolgreich hinzugefügt. (Ist bereits im System vorhanden)
Veröffentlichter Name: oem76.inf
Treiberpaket wird hinzugefügt: Coral_USB_Accelerator_(DFU).inf
Das Treiberpaket wurde erfolgreich hinzugefügt. (Ist bereits im System vorhanden)
Veröffentlichter Name: oem77.inf
Treiberpaket auf dem Gerät installiert: USB\VID_1A6E&PID_089A\5&32865703&0&17
Treiberpaket auf dem Gerät installiert: USB\VID_1A6E&PID_089A\5&32865703&0&18
Das Treiberpaket auf dem Gerät ist auf dem neuesten Stand: USB\VID_1A6E&PID_089A\5&32865703&0&19
Treiberpakete insgesamt: 3
Hinzugefügte Treiberpakete: 3
Installing performance counters
Info: Anbieter {aaa5bf9e-c44b-4177-af65-d3a06ba45fe7}, der in C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\edgetpu_runtime\third_party\coral_accelerator_windows\coral.man definiert ist, ist bereits im Systemrepository installiert.
Info: Die Leistungsindikatoren wurden erfolgreich in C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\edgetpu_runtime\third_party\coral_accelerator_windows\coral.man installiert.Copying edgetpu and libusb to System32
1 Datei(en) kopiert.
1 Datei(en) kopiert.
Install complete
Drücken Sie eine beliebige Taste . . .
Result:
$ python examples/classify_image.py --model test_data/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite --labels test_data/inat_bird_labels.txt --input test_data/parrot.jpg
Traceback (most recent call last):
File "C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\lib\site-packages\tflite_runtime\interpreter.py", line 160, in load_delegate
delegate = Delegate(library, options)
File "C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\lib\site-packages\tflite_runtime\interpreter.py", line 119, in __init__
raise ValueError(capture.message)
ValueError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "examples/classify_image.py", line 121, in <module>
main()
File "examples/classify_image.py", line 71, in main
interpreter = make_interpreter(*args.model.split('@'))
File "C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\lib\site-packages\pycoral\utils\edgetpu.py", line 87, in make_interpreter
delegates = [load_edgetpu_delegate({'device': device} if device else {})]
File "C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\lib\site-packages\pycoral\utils\edgetpu.py", line 52, in load_edgetpu_delegate
return tflite.load_delegate(_EDGETPU_SHARED_LIB, options or {})
File "C:\Users\anja-\Anja_Programme\AnjaCoral\envCoralPy8\lib\site-packages\tflite_runtime\interpreter.py", line 163, in load_delegate
library, str(e)))
ValueError: Failed to load delegate from edgetpu.dll
(envCoralPy8)
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for using the TensorFlow Lite Interpreter with Edge TPU."""
import contextlib
import ctypes
import ctypes.util
import numpy as np
# pylint:disable=unused-import
from pycoral.pybind._pywrap_coral import GetRuntimeVersion as get_runtime_version
from pycoral.pybind._pywrap_coral import InvokeWithBytes as invoke_with_bytes
from pycoral.pybind._pywrap_coral import InvokeWithDmaBuffer as invoke_with_dmabuffer
from pycoral.pybind._pywrap_coral import InvokeWithMemBuffer as invoke_with_membuffer
from pycoral.pybind._pywrap_coral import ListEdgeTpus as list_edge_tpus
from pycoral.pybind._pywrap_coral import SetVerbosity as set_verbosity
from pycoral.pybind._pywrap_coral import SupportsDmabuf as supports_dmabuf
import platform
import tflite_runtime.interpreter as tflite
_EDGETPU_SHARED_LIB = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'
}[platform.system()]
def load_edgetpu_delegate(options=None):
"""Loads the Edge TPU delegate with the given options.
Args:
options (dict): Options that are passed to the Edge TPU delegate, via
``tf.lite.load_delegate``. The only option you should use is
"device", which defines the Edge TPU to use. Supported values are the same
as `device` in :func:`make_interpreter`.
Returns:
The Edge TPU delegate object.
"""
return tflite.load_delegate(_EDGETPU_SHARED_LIB, options or {})
def make_interpreter(model_path_or_content, device=None, delegate=None):
"""Creates a new ``tf.lite.Interpreter`` instance using the given model.
**Note:** If you have multiple Edge TPUs, you should always specify the
``device`` argument.
Args:
model_path_or_content (str or bytes): `str` object is interpreted as
model path, `bytes` object is interpreted as model content.
device (str): The Edge TPU device you want:
+ None -- use any Edge TPU (this is the default)
+ ":<N>" -- use N-th Edge TPU (this corresponds to the enumerated
index position from :func:`list_edge_tpus`)
+ "usb" -- use any USB Edge TPU
+ "usb:<N>" -- use N-th USB Edge TPU
+ "pci" -- use any PCIe Edge TPU
+ "pci:<N>" -- use N-th PCIe Edge TPU
If left as None, you cannot reliably predict which device you'll get.
So if you have multiple Edge TPUs and want to run a specific model on
each one, then you must specify the device.
delegate: A pre-loaded Edge TPU delegate object, as provided by
:func:`load_edgetpu_delegate`. If provided, the `device` argument
is ignored.
Returns:
New ``tf.lite.Interpreter`` instance.
"""
if delegate:
delegates = [delegate]
else:
delegates = [load_edgetpu_delegate({'device': device} if device else {})]
if isinstance(model_path_or_content, bytes):
return tflite.Interpreter(
model_content=model_path_or_content, experimental_delegates=delegates)
else:
return tflite.Interpreter(
model_path=model_path_or_content, experimental_delegates=delegates)
# ctypes definition of GstMapInfo. This is a stable API, guaranteed to be
# ABI compatible for any past and future GStreamer 1.0 releases.
# Used to get the underlying memory pointer without any copies, and without
# native library linking against libgstreamer.
class _GstMapInfo(ctypes.Structure):
_fields_ = [
('memory', ctypes.c_void_p), # GstMemory *memory
('flags', ctypes.c_int), # GstMapFlags flags
('data', ctypes.c_void_p), # guint8 *data
('size', ctypes.c_size_t), # gsize size
('maxsize', ctypes.c_size_t), # gsize maxsize
('user_data', ctypes.c_void_p * 4), # gpointer user_data[4]
('_gst_reserved', ctypes.c_void_p * 4)
] # GST_PADDING
# Try to import GStreamer but don't fail if it's not available. If not available
# we're probably not getting GStreamer buffers as input anyway.
_libgst = None
try:
# pylint:disable=g-import-not-at-top
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstAllocators', '1.0')
# pylint:disable=g-multiple-import
from gi.repository import Gst, GstAllocators
_libgst = ctypes.CDLL(ctypes.util.find_library('gstreamer-1.0'))
_libgst.gst_buffer_map.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(_GstMapInfo), ctypes.c_int
]
_libgst.gst_buffer_map.restype = ctypes.c_int
_libgst.gst_buffer_unmap.argtypes = [
ctypes.c_void_p, ctypes.POINTER(_GstMapInfo)
]
_libgst.gst_buffer_unmap.restype = None
except (ImportError, ValueError, OSError):
pass
def _is_valid_ctypes_input(input_data):
if not isinstance(input_data, tuple):
return False
pointer, size = input_data
if not isinstance(pointer, ctypes.c_void_p):
return False
return isinstance(size, int)
@contextlib.contextmanager
def _gst_buffer_map(buffer):
"""Yields gst buffer map."""
mapping = _GstMapInfo()
ptr = hash(buffer)
success = _libgst.gst_buffer_map(ptr, mapping, Gst.MapFlags.READ)
if not success:
raise RuntimeError('gst_buffer_map failed')
try:
yield ctypes.c_void_p(mapping.data), mapping.size
finally:
_libgst.gst_buffer_unmap(ptr, mapping)
def _check_input_size(input_size, expected_input_size):
if input_size < expected_input_size:
raise ValueError('input size={}, expected={}.'.format(
input_size, expected_input_size))
def run_inference(interpreter, input_data):
"""Performs interpreter ``invoke()`` with a raw input tensor.
Args:
interpreter: The ``tf.lite.Interpreter`` to invoke.
input_data: A 1-D array as the input tensor. Input data must be uint8
format. Data may be ``Gst.Buffer`` or :obj:`numpy.ndarray`.
"""
input_shape = interpreter.get_input_details()[0]['shape']
expected_input_size = np.prod(input_shape)
interpreter_handle = interpreter._native_handle() # pylint:disable=protected-access
if isinstance(input_data, bytes):
_check_input_size(len(input_data), expected_input_size)
invoke_with_bytes(interpreter_handle, input_data)
elif _is_valid_ctypes_input(input_data):
pointer, actual_size = input_data
_check_input_size(actual_size, expected_input_size)
invoke_with_membuffer(interpreter_handle, pointer.value,
expected_input_size)
elif _libgst and isinstance(input_data, Gst.Buffer):
memory = input_data.peek_memory(0)
map_buffer = not GstAllocators.is_dmabuf_memory(
memory) or not supports_dmabuf(interpreter_handle)
if not map_buffer:
_check_input_size(memory.size, expected_input_size)
fd = GstAllocators.dmabuf_memory_get_fd(memory)
try:
invoke_with_dmabuffer(interpreter_handle, fd, expected_input_size)
except RuntimeError:
# dma-buf input didn't work, likely due to old kernel driver. This
# situation can't be detected until one inference has been tried.
map_buffer = True
if map_buffer:
with _gst_buffer_map(input_data) as (pointer, actual_size):
assert actual_size >= expected_input_size
invoke_with_membuffer(interpreter_handle, pointer.value,
expected_input_size)
elif isinstance(input_data, np.ndarray):
_check_input_size(len(input_data), expected_input_size)
invoke_with_membuffer(interpreter_handle, input_data.ctypes.data,
expected_input_size)
else:
raise TypeError('input data type is not supported.')