datadog / dd-trace-py Goto Github PK
View Code? Open in Web Editor NEWDatadog Python APM Client
Home Page: https://ddtrace.readthedocs.io/
License: Other
Datadog Python APM Client
Home Page: https://ddtrace.readthedocs.io/
License: Other
I am trying to run gunicorn with ddtrace-run however I keep hitting this same error no matter what or where I try and execute the command I get this result:
Traceback (most recent call last):
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/workers/base.py", line 122, in init_process
self.load_wsgi()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/workers/base.py", line 130, in load_wsgi
self.wsgi = self.app.wsgi()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/wsgiapp.py", line 65, in load
return self.load_wsgiapp()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp
return util.import_app(self.app_uri)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/util.py", line 357, in import_app
__import__(module)
File "/home/webapps/minecraftmarket/minecraftmarket/wsgi.py", line 24, in <module>
application = get_wsgi_application()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/core/wsgi.py", line 13, in get_wsgi_application
django.setup(set_prefix=False)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/ddtrace/contrib/django/patch.py", line 18, in traced_setup
if 'ddtrace.contrib.django' not in settings.INSTALLED_APPS:
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 56, in __getattr__
self._setup(name)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 41, in _setup
self._wrapped = Settings(settings_module)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 110, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/virtualenv/minecraftmarket/lib/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/home/webapps/minecraftmarket/minecraftmarket/settings/__init__.py", line 5, in <module>
from .celery import app as celery_app
File "/home/webapps/minecraftmarket/minecraftmarket/settings/celery.py", line 24, in <module>
django.setup()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/ddtrace/contrib/django/patch.py", line 34, in traced_setup
if 'ddtrace.contrib.django.TraceMiddleware' not in settings.MIDDLEWARE:
TypeError: argument of type 'NoneType' is not iterable
Traceback (most recent call last):
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/workers/base.py", line 122, in init_process
self.load_wsgi()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/workers/base.py", line 130, in load_wsgi
self.wsgi = self.app.wsgi()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/wsgiapp.py", line 65, in load
return self.load_wsgiapp()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp
return util.import_app(self.app_uri)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/gunicorn/util.py", line 357, in import_app
__import__(module)
File "/home/webapps/minecraftmarket/minecraftmarket/wsgi.py", line 24, in <module>
application = get_wsgi_application()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/core/wsgi.py", line 13, in get_wsgi_application
django.setup(set_prefix=False)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/ddtrace/contrib/django/patch.py", line 18, in traced_setup
if 'ddtrace.contrib.django' not in settings.INSTALLED_APPS:
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 56, in __getattr__
self._setup(name)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 41, in _setup
self._wrapped = Settings(settings_module)
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/django/conf/__init__.py", line 110, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/virtualenv/minecraftmarket/lib/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/home/webapps/minecraftmarket/minecraftmarket/settings/__init__.py", line 5, in <module>
from .celery import app as celery_app
File "/home/webapps/minecraftmarket/minecraftmarket/settings/celery.py", line 24, in <module>
django.setup()
File "/virtualenv/minecraftmarket/lib/python3.5/site-packages/ddtrace/contrib/django/patch.py", line 34, in traced_setup
if 'ddtrace.contrib.django.TraceMiddleware' not in settings.MIDDLEWARE:
TypeError: argument of type 'NoneType' is not iterable
[2017-07-14 14:15:10 +0000] [10132] [INFO] Worker exiting (pid: 10132)
[2017-07-14 10:15:11 -0400] [10126] [INFO] Shutting down: Master
[2017-07-14 10:15:11 -0400] [10126] [INFO] Reason: Worker failed to boot.
(minecraftmarket)webapps@srv1:~/minecraftmarket$
I can confirm the middleware is present, along with the installed app and the application sucessfully sends APM data for everything I have patched already within the application without fault.
So I'm unsure of where to start here?
Ostensibly introduced in 0.4.0, the requests.py package is still not autopatched.
Is there a timeline or update on when the requests package will be patched when calling patch_all()
without arguments?
In https://circleci.com/gh/DataDog/dd-trace-py/197, Postgres failed to start. This meant that the until nc -v -z localhost 9042 ; do sleep 0.2 ; done
check ran indefinitely, until the job was cancelled.
I'm not sure why the CircleCI test didn't fail entirely when Postgres failed to start; the docs here suggest that failing commands should cause the whole build to fail, but this didn't happen.
Queries that are ran by using Session.execute_async
do not generate spans.
I'm in touch with @palazzem about the issue on Slack.
Minimal reproduction, assuming you have a keyspace named test
with a table, called a
, with the appropriate fields.
cluster = cluster.Cluster()
keyspace = cluster.connect('test')
statement = keyspace.prepare('SELECT a, b, c FROM test WHERE a = ?')
def do_some_stuff():
futures = []
for i in xrange(100):
futures.append(keyspace.execute_async(statement, {'a': 1}))
return [
c.result()[0]
for c in futures
]
CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};
CREATE TABLE IF NOT EXISTS test.test (
a bigint,
b bigint,
c bigint,
PRIMARY KEY (a)
);
It might be nice to be able to define a tracing reporting level, like a log level, to traces and then set a global "trace level".
My reasoning here is there are some cases in development/testing where I want to trace many more things to be able to debug a specific problem or performance issue. However, these traces are just too much/noisy to leave in for production.
Not sure if there is already a way to handle such a case? Unless I create a separate Tracer
instance and just enable/disable that one as needed?
from ddtrace import tracer
# report all traces of level 'debug' or higher
tracer.set_level('debug')
# Trace this function only when level is 'debug'
@tracer.wrap(level='debug')
def child():
pass
# Always trace this function
@tracer.wrap()
def parent():
child()
with tracer.trace('main', service='main'):
parent()
See behavior here:
>>> import ddtrace
>>> ddtrace.patch_all()
>>> import psycopg2
>>> conn = psycopg2.connect('postgresql://localhost')
>>> print(type(conn.cursor()))
<class 'ddtrace.contrib.dbapi.TracedCursor'>
>>> with conn.cursor() as cur:
... print(type(cur))
<type 'psycopg2.extensions.cursor'>
while browsing the code I noticed in https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/contrib/asyncio/provider.py#L34 where it's associating a context with the current task, note that when doing things like asyncio.gather
it will create new tasks so you won't be able to retrieve the context for the sub-tasks. In our product I wrote a monkeypatch to the task constructor to ensure that sub-tasks get associated with their "parent" tasks.
Hi friends,
I've been trying to do some heavy profiling of a python program and have been trying to hunt down a 15 millisecond lag at the beginning of my processes.
The tracer shows nothing.
In an act of desperation, I moved over to new relic, which shows the offender is the psycopg2 connect function. This is monkey patched by the tracer, and I suspect the tracer itself is the cause of the slowdown (although I am unsure and removing it now).
Thanks!
I ran into a small issue where the Flask integration is unable to set tags when they contain unicode characters.
The specific case we have is when unicode was in the url.
2016-10-28 13:47:58,953 - ERROR - error finishing trace
Traceback (most recent call last):
File "/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py", line 150, in _request_finished
self._finish_span(response=response)
File "/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py", line 119, in _finish_span
span.set_tag(http.URL, str(request.base_url or ""))
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe8' in position 31: ordinal not in range(128)
Happy to submit a PR for a fix, not sure how you would like to go about it, my thinking was:
from ...compat import stringify
# Use `stringify` instead of `str` throughout the module
span.set_tag(http.URL, stringify(request.base_url or ""))
The redis integration tracks NoScriptError
as an exception when calling a registered script. This exception is acceptable when registering/calling a script for the first time - and shouldn't be logged or treated as an error.
https://github.com/andymccurdy/redis-py/blob/master/redis/client.py#L2922
need to handle this case.
<BoundStatement query="SELECT * FROM my_table WHERE id = ? LIMIT ?", values=[12356L], consistency=LOCAL_ONE>
cc @jhgg
From a quick glance, the only problem is the dependency on django.apps
which should be simply to work around.
I was going to work through a PR for this.
When using the monkey-patched gevent
integration, if you attempt to create a trace on the main greenlet, the context will not be stored to the tracer, and tracer.current_span()
will error.
# patch before importing gevent
from ddtrace import patch, tracer
patch(gevent=True)
# use gevent as usual with or without the monkey module
import gevent
from gevent import monkey; monkey.patch_all()
@tracer.wrap(service='web')
def my_parent_function():
span = tracer.current_span()
gevent.spawn(worker_function)
def worker_function():
# then trace its child
with tracer.trace("greenlet.call") as span:
span.service = "greenlet"
with tracer.trace("greenlet.child_call") as child:
print("Worker function")
my_parent_function()
Yields the following error:
Traceback (most recent call last):
File "geventtracetest.py", line 23, in <module>
my_parent_function()
File "/usr/local/lib/python2.7/site-packages/ddtrace/tracer.py", line 369, in func_wrapper
return f(*args, **kwargs)
File "geventtracetest.py", line 11, in my_parent_function
span = tracer.current_span()
File "/usr/local/lib/python2.7/site-packages/ddtrace/tracer.py", line 234, in current_span
return self.get_call_context().get_current_span()
AttributeError: 'NoneType' object has no attribute 'get_current_span'
Tracing is initialized as
trace_engine(db.engine, tracer, service='abc')
I get such error:
File "/usr/local/lib/python3.5/dist-packages/ddtrace/contrib/sqlalchemy/engine.py", line 65, in _after_cur_exec span = self.tracer.current_span() File "/usr/local/lib/python3.5/dist-packages/ddtrace/tracer.py", line 220, in current_span return self.get_call_context().get_current_span()
This current DB hit is done outside any request, while initializing application (registers some classes metadata into DB).
Is there a way to specify context, or that part should check if get_call_context()
is not None?
I'm curious what the status of the monkey patch for requests
on your roadmap is?
I set it to true just to check and not seeing anything in my Traces. I did see some warnings in my trace agent that was saying that the span for requests had no Service.
Line 30 in 5b684b6
PATCH_MODULES = {
.....
'requests': False, # Not ready yet
....
}
The tracer is logging the following error when disabled:
2017-07-05 12:54:36,552:[none]:[ddtrace.writer:134]:ERROR cannot send services: [Errno 111] Connection refused
This is occurring when integrated with Django with the following configuration:
DATADOG_TRACE = {
'ENABLED': False
}
From reading the documentation which states:
ENABLED (default: not django_settings.DEBUG): defines if the tracer is enabled or not. If set to false, the code is still instrumented but no spans are sent to the trace agent. This setting cannot be changed at runtime and a restart is required. By default the tracer is disabled when in DEBUG mode, enabled otherwise.
It seems this log should not occur. If no spans are sent to the trace agent then presumably a connection should not be established?
datadog==0.15.0
ddtrace==0.8.5
We added ddtrace-run
to our py3 + pyramid app which caused it to crash on startup with an AttributeError
(module 'ddtrace.contrib.pyramid' has no attribute 'models').
The cause turned out to be a series of config.include()
calls which were using relative import paths; under ddtrace, pyramid's path resolver was mapping .foo
to ddtrace.contrib.pyramid.foo
(i guess it changes what __main__
means?)
Switching to absolute paths fixed the issue.
We're noticing this exception - using latest pymongo==3.4.0
driver:
Traceback (most recent call last):
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 105, in send_message_with_response
span.resource = _resource_from_cmd(cmd)
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 220, in _resource_from_cmd
nq = normalize_filter(cmd.query)
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 207, in normalize_filter
out[k] = normalize_filter(v)
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 207, in normalize_filter
out[k] = normalize_filter(v)
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 199, in normalize_filter
return [normalize_filter(s) for s in f]
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/contrib/pymongo/client.py", line 204, in normalize_filter
for k, v in iteritems(f):
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/compat.py", line 32, in iteritems
func = obj.items
AttributeError: 'long' object has no attribute 'items'
one service per domain and one resource per path is too much. either needs to be handled with general quotas or more configuration.
The context is not properly passed from parent greenlet to child greenlet when using gevent.pool.Group().map
. Spawning greenlets directly works:
pool = gevent.pool.Group()
# Context passes correctly
greenlets = [pool.spawn(myfunc, arg) for arg in args]
results = [g.get() for g in greenlets]
# Context does not pass correctly
results = pool.map(myfunc, args)
After some debugging, I tracked it to this point in ddtrace
.
Basically, before spawning a TracedGreenlet
, it asks gevent
for the current greenlet, and takes the context attached to that greenlet and attaches it to the new greenlet. However, when using pool.map
, the "current" greenlet is not a greenlet; its an instance of IMap
, which has not inherited the context.
WARNING:ddtrace.span:error setting tag cassandra.query, ignoring it
Traceback (most recent call last):
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/span.py", line 126, in set_tag
self.meta[key] = stringify(value)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xdd in position 4: ordinal not in range(128)
[W 161222 20:04:14 span:128] error setting tag cassandra.query, ignoring it
Traceback (most recent call last):
File "/home/deploy/virtualenvs/discord/local/lib/python2.7/site-packages/ddtrace/span.py", line 126, in set_tag
self.meta[key] = stringify(value)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xdd in position 4: ordinal not in range(128)
Hi,
I just got into the APM beta and have been trying it out with no luck. I have both the dd-agent and the dd-trace-agent installed and seemingly working just fine. /var/log/datadog/trace-agent.log
looks like:
2016-12-29 23:35:48 INFO (receiver.go:86) - listening for traces at http://localhost:7777/
2016-12-29 23:36:48 INFO (receiver.go:288) - receiver handled 0 spans, dropped 0 ; handled 0 traces, dropped 0
2016-12-29 23:37:48 INFO (receiver.go:288) - receiver handled 0 spans, dropped 0 ; handled 0 traces, dropped 0
2016-12-29 23:38:48 INFO (receiver.go:288) - receiver handled 0 spans, dropped 0 ; handled 0 traces, dropped 0
2016-12-29 23:39:48 INFO (receiver.go:288) - receiver handled 0 spans, dropped 0 ; handled 0 traces, dropped 0
...
Unfortunately, in my app log, I get this every little while:
ERROR:ddtrace.writer:error sending spans
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/ddtrace/writer.py", line 120, in _target
self.api.send_traces(traces)
File "/usr/local/lib/python2.7/site-packages/ddtrace/api.py", line 47, in send_traces
response = self._put(self._traces, data)
File "/usr/local/lib/python2.7/site-packages/ddtrace/api.py", line 78, in _put
conn.request("PUT", endpoint, data, self._headers)
File "/usr/local/lib/python2.7/httplib.py", line 1057, in request
self._send_request(method, url, body, headers)
File "/usr/local/lib/python2.7/httplib.py", line 1097, in _send_request
self.endheaders(body)
File "/usr/local/lib/python2.7/httplib.py", line 1053, in endheaders
self._send_output(message_body)
File "/usr/local/lib/python2.7/httplib.py", line 897, in _send_output
self.send(msg)
File "/usr/local/lib/python2.7/httplib.py", line 859, in send
self.connect()
File "/usr/local/lib/python2.7/httplib.py", line 836, in connect
self.timeout, self.source_address)
File "/usr/local/lib/python2.7/socket.py", line 575, in create_connection
raise err
error: [Errno 111] Connection refused
In my app, I'm just doing:
from ddtrace import patch_all
patch_all()
What's probably relevant here is my app is being run in a Docker container whereas my agent is installed on the host. This works fine for me on NewRelic's agent, but it looks like it might be an issue for DataDog's?
vagrant@ubuntu-1404:~/go/src/github.com/DataDog/dd-trace-py [18:35:18][gabin/lang2] $ rake test
docker-compose up -d | cat
ddtracepy_redis_1 is up-to-date
ddtracepy_cassandra_1 is up-to-date
ddtracepy_mongo_1 is up-to-date
ddtracepy_elasticsearch_1 is up-to-date
ddtracepy_memcached_1 is up-to-date
ddtracepy_mysql_1 is up-to-date
ddtracepy_ddagent_1 is up-to-date
ddtracepy_postgres_1 is up-to-date
tox
GLOB sdist-make: /home/vagrant/go/src/github.com/DataDog/dd-trace-py/setup.py
ERROR: invocation failed (exit code 1), logfile: /home/vagrant/go/src/github.com/DataDog/dd-trace-py/.tox/log/tox-0.log
ERROR: actionid: tox
msg: packaging
cmdargs: ['/usr/bin/python', local('/home/vagrant/go/src/github.com/DataDog/dd-trace-py/setup.py'), 'sdist', '--formats=zip', '--dist-dir', local('/home/vagrant/go/src/github.com/DataDog/dd-trace-py/.tox/dist')]
env: None
running sdist
running egg_info
writing requirements to ddtrace.egg-info/requires.txt
writing ddtrace.egg-info/PKG-INFO
writing top-level names to ddtrace.egg-info/top_level.txt
writing dependency_links to ddtrace.egg-info/dependency_links.txt
writing entry points to ddtrace.egg-info/entry_points.txt
reading manifest file 'ddtrace.egg-info/SOURCES.txt'
writing manifest file 'ddtrace.egg-info/SOURCES.txt'
running check
creating ddtrace-0.8.5
creating ddtrace-0.8.5/ddtrace
creating ddtrace-0.8.5/ddtrace.egg-info
creating ddtrace-0.8.5/ddtrace/bootstrap
creating ddtrace-0.8.5/ddtrace/commands
creating ddtrace-0.8.5/ddtrace/contrib
creating ddtrace-0.8.5/ddtrace/contrib/aiobotocore
creating ddtrace-0.8.5/ddtrace/contrib/aiohttp
creating ddtrace-0.8.5/ddtrace/contrib/asyncio
creating ddtrace-0.8.5/ddtrace/contrib/boto
creating ddtrace-0.8.5/ddtrace/contrib/botocore
creating ddtrace-0.8.5/ddtrace/contrib/bottle
creating ddtrace-0.8.5/ddtrace/contrib/cassandra
creating ddtrace-0.8.5/ddtrace/contrib/celery
creating ddtrace-0.8.5/ddtrace/contrib/dbapi
creating ddtrace-0.8.5/ddtrace/contrib/django
creating ddtrace-0.8.5/ddtrace/contrib/elasticsearch
creating ddtrace-0.8.5/ddtrace/contrib/falcon
creating ddtrace-0.8.5/ddtrace/contrib/flask
creating ddtrace-0.8.5/ddtrace/contrib/flask_cache
creating ddtrace-0.8.5/ddtrace/contrib/gevent
creating ddtrace-0.8.5/ddtrace/contrib/httplib
creating ddtrace-0.8.5/ddtrace/contrib/mongoengine
creating ddtrace-0.8.5/ddtrace/contrib/mysql
creating ddtrace-0.8.5/ddtrace/contrib/psycopg
creating ddtrace-0.8.5/ddtrace/contrib/pylibmc
creating ddtrace-0.8.5/ddtrace/contrib/pylons
creating ddtrace-0.8.5/ddtrace/contrib/pymongo
creating ddtrace-0.8.5/ddtrace/contrib/pyramid
creating ddtrace-0.8.5/ddtrace/contrib/redis
creating ddtrace-0.8.5/ddtrace/contrib/requests
creating ddtrace-0.8.5/ddtrace/contrib/sqlalchemy
creating ddtrace-0.8.5/ddtrace/contrib/sqlite3
creating ddtrace-0.8.5/ddtrace/contrib/tornado
creating ddtrace-0.8.5/ddtrace/ext
making hard links in ddtrace-0.8.5...
hard linking README.rst -> ddtrace-0.8.5
hard linking setup.cfg -> ddtrace-0.8.5
hard linking setup.py -> ddtrace-0.8.5
hard linking ddtrace/__init__.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/api.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/compat.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/compat_async.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/context.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/encoding.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/monkey.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/pin.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/provider.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/sampler.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/span.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/tracer.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/util.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace/writer.py -> ddtrace-0.8.5/ddtrace
hard linking ddtrace.egg-info/PKG-INFO -> ddtrace-0.8.5/ddtrace.egg-info
error: [Errno 13] Permission denied
ERROR: FAIL could not package project - v = InvocationError('/usr/bin/python /home/vagrant/go/src/github.com/DataDog/dd-trace-py/setup.py sdist --formats=zip --dist-dir /home/vagrant/go/src/github.com/DataDog/dd-trace-py/.tox/dist (see /home/vagrant/go/src/github.com/DataDog/dd-trace-py/.tox/log/tox-0.log)', 1)
docker-compose kill
Killing ddtracepy_ddagent_1 ... done
Killing ddtracepy_postgres_1 ... done
Killing ddtracepy_memcached_1 ... done
Killing ddtracepy_mysql_1 ... done
Killing ddtracepy_elasticsearch_1 ... done
Killing ddtracepy_mongo_1 ... done
Killing ddtracepy_cassandra_1 ... done
Killing ddtracepy_redis_1 ... done
rake aborted!
Command failed with status (2): [tox...]
/home/vagrant/go/src/github.com/DataDog/dd-trace-py/Rakefile:5:in `block in <top (required)>'
/var/lib/gems/1.9.1/gems/rake-12.0.0/exe/rake:27:in `<top (required)>'
Tasks: TOP => test
(See full trace by running task with --trace)
Problem related to: https://stackoverflow.com/questions/7719380/python-setup-py-sdist-error-operation-not-permitted
Solution: Add del os.link
after import os
in setup.py
I just wanted to put this here as a place holder since I am currently working on an integration for celery
.
If anyone is using celery
and is interested in tracing any specific functions/features, please feel free to leave a comment. I am still trying to get a game plan together of what is worth tracing (tasks yes, but as far as the worker/consumer/producers/queues/etc TDB).
I am currently trying to take a "patch" approach with the integration, e.g.:
from ddtrace.contrib.celery import patch_all; patch_all()
django.cache
applies django.
here
django.template
applies django.
here
yet django.db
does not apply the django.
prefix, which creates a service per database defined, and duplicates a large amount of data that the postgres
patcher already collects.
Slide into my DMs on Datadog's official Slack to get some example links.
This is a feature request.
Can you please add tracing for requests via boto, especially to s3 which might get very time consuming.
The patches in "ddtrace/contrib/psycopg/connection.py" provide a new default cursor that performs APM tracing, but In my code I've been using the "psycopg2.extras.DictCursor" so no tracing is performed as the patch only replaces the default, it does not wrap the provided cursor.
Any ideas on how this should be solved?
if you run the app with uwsgi process forking and use the tracer before the fork, we'll leak traces.
to repro here's a small flask app:
# use tracing right away
from ddtrace import tracer
# start a dummy trace here to ensure we start tracing
# before we fork.
with tracer.trace("aaaa"): pass
app = Flask(__name__)
traced_app = TraceMiddleware(app, tracer, service="foo")
@app.route('/')
def foo():
return 'hello'
and run with:
uwsgi -p 4 --http :9090 --manage-script-name --mount /=path:app --enable-threads
or
git clone https://github.com/DataDog/trace-examples
cd python/flask
rake uwsgi
workaround: run with --lazy-apps
Hello
I just run into issue that I wasn't been able to make ddtrace work on python 2.7.6 (one of our servers) while it worked on local machine with py2.7.13 in docker container.
Installing python 2.7.13 (compiling from source, just for the sake of test for this one project) make it working on server.
Upgrading python globally on server, to py2.7.13 did not work, ddtrace still complains with the error pasted below.
Going to keep you posted wit our findings and if you can help, please do not hesitate :)
Thanks!
When wrapping application with TraceMiddlware as it is mentioned in the docs I get the error:
venv/lib/python2.7/site-packages/flask/app.py:1836: in __call__
return self.wsgi_app(environ, start_response)
E TypeError: 'TraceMiddleware' object is not callable
The middleware is not callable (no sign of __call__
method) which means it violates the PEP333
keys that are being reported:
SETEX FOOBARBLAH
SETEX* NF
SETEX TOMATO
...
It appears that the wrap api defaults do not work with the UI.
I need to do this for it to work:
@tracer.wrap(name=DATADOG_SETTINGS["TRACER_SERVICE_NAME"],service=DATADOG_SETTINGS["TRACER_SERVICE_NAME"], resource="FUNCTION_NAME")
In our application we have a process pool of botocore workers. Is there a way, in general, to ensure that sub-processes can chain up to their parent process traces?
I'm guessing the right context details needs to be transferred to the sub-process to make things work.
has anyone else run into the issue of ddtrace-py turning on all default loggers for everything when running `patch_all()`? Weirdly, I can only replicate it within Docker, but it's definitely the `patch_all()` command that's causing it
[8:50 PM]
same thing happens if i run a single `patch()` on any library, it seems
[8:52 PM]
thinking it might be caused by this line: https://github.com/DataDog/dd-trace-py/blob/a50b5f5422716fae1c54b589cd448dc295b32757/ddtrace/monkey.py#L77
[8:53 PM]
any reason that's `logging.info(...)` on the `logging` module instead of getting a logger and calling `.info()` on that?
I'm noticing in my logs from celery workers ddtrace-run python manage.py celery worker ...
this strange error:
WARNING:sitecustomize:error configuring Datadog tracing
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ddtrace/bootstrap/sitecustomize.py", line 37, in <module>
from ddtrace import tracer
ImportError: No module named 'ddtrace'
Which is strange as the normal django tracing etc. is working fine.
I'm running the workers with -l INFO -Ofair --concurrency=1
.
django==1.8.18
celery==3.1.25
django-celery==3.2.1
We're trying to use trace for our Pyramid/Python3 apps. Our logs are filled with entries like this:
2017-07-17 13:40:47 ERROR (receiver.go:232) - dropping trace reason: invalid span Span[t_id:7175232974767753317,s_id:17561579160692702674,p_id:0,ser:,name:pyramid.render,res:pyramid.render]: span.normalize: empty `Service` (debug for more info), [Span[t_id:7175232974767753317,s_id:17561579160692702674,p_id:0,ser:,name:pyramid.render,res:pyramid.render]]
We're not sure whether there's something peculiar about our app or stack that's causing the issue. We did have to make a few small changes to avoid crashing on startup with trace - they are in the PR and I also filed #311 - but this seems to be another problem entirely.
otherwise the same span can be traced multiple times.
More details here: https://github.com/tomchristie/apistar
Using Anaconda in a Windows environment, here is the .sublime-build file:
{
"name": "Anaconda DD Python Builder",
"shell_cmd": "ddtrace-run python -u \"$file\"",
"file_regex": "^[ ]*File \"(...*?)\", line ([0-9]*)",
"selector": "source.python"
}
Upon execution (build) of any script, I receive the following:
__init__() got an unexpected keyword argument 'name'
If I remove ddtrace-run
from the shell_cmd
line, everything executes as expected.
I was wondering what the intended use was for each of these methods. I understand trace()
uses context more so multiple traces that occur in a nesting fashion creates a lineage of spans, this makes sense. start_span()
seems to be similar but puts more responsibility on the user to define the lineage via another span or some context.
But my question lies in the mixing of the two OR this particular case of two start_span()
calls in a row (I'll get to that one second).
The first case is when a user creates a root span by calling start_span()
then start_span(child_of=tracer.get_call_context()
. Here's an example:
span = tracer.start_span("name", "service", "resource")
different_span = tracer.start_span("different name", "service", "resource",
child_of=tracer.get_call_context())
My assumption is that different_span
should be the child of span
, but I don't believe this is the case. When span
was created with the child_of
arg being passed in as default None
, the start_span
does the following:
parent_span_id
and parent_trace_id
At this point the tracer object's context looks the same as it did before start_span
was called since the context which just gained a new span was garbage collected at the end of the method. I also notice that step 3 seems to take no effect seeing as that line is only executed if parent
is None which means the context
variable was newly created making parent_span_id
and parent_trace_id
default to None
. This could be due to an intention of created a Context Provider that better supports distributed tracing and maybe these couple of lines are themselves a WIP.
I'm just more curious on what the intention is in this case of using start_span
seeing as if a span is created with child_of=None
then its not only a root span but has little opportunity for it being a parent span as well.
This goes with using start_span(...,child_of=None
and then trace()
as well. I see a similar effect happening and wonder if the intention is to use trace()
only with other trace()
's and start_span()
mostly by passing in the parent span directly.
Apologies for the long post but this caused my great confusion when developing with the library and made me question my understanding of it.
Hi, i use gunicorn with gevent workers, after install ddtrace i'm not receiving db traces, if switch for sync workers i receive all db traces.
I tried on gunicorn post_fork patch gevent but not work anyway.
This is a possible bug or missing something?
My system is:
ddtrace==0.6.0
Django==1.10.6
gevent==1.2.1
gunicorn==19.6.0
On this line: https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/contrib/httplib/patch.py#L39
it does a return
w/o sending "resp", which ends up nulling out the response. It should instead return resp
It appears that the Bottle middleware makes some assumptions about the data that you would want to report on without the ability to extend those options. Currently, it looks like this:
finally:
s.set_tag(http.STATUS_CODE, code or response.status_code)
s.set_tag(http.URL, request.path)
s.set_tag(http.METHOD, request.method)
It would be nice for this behavior to be extensible so that users could add arbitrary tags based on the requests coming through. For example, something like this may work:
finally:
tags = self.get_tags(code, request)
for tag_name, tag_value in tags.iteritems()
s.set_tag(tag_name, tag_value)
Where get_tags
would return a dictionary of tags/values and by default provide the values that are currently there.
This would allow users to subclass TracePlugin to provide more meaningful values for their organizations.
This would be useful not only for the obvious reasons of being able to trace aiohttp client queries, however has side-benefits like tracing each request made from aiobotocore.
I'm adding this as a issue because I just found that botocore/aiobotocore doesn't trace the underlying retry requests, and right now only convey back the information from the last request. This is useful in our case because we want to log the request IDs of AWS requests that timed out so we can open a support case.
We are getting this while using just @tracer.wrap()
.
Agent is running as well as the tracer-agent. Anything else we should be setting up here?
error sending spans
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/ddtrace/writer.py", line 120, in _target
self.api.send_traces(traces)
File "/usr/local/lib/python3.4/dist-packages/ddtrace/api.py", line 47, in send_traces
File "/usr/local/lib/python3.4/dist-packages/ddtrace/api.py", line 78, in _put
response = self._put(self._traces, data)
File "/usr/lib/python3.4/http/client.py", line 1125, in request
conn.request("PUT", endpoint, data, self._headers)
self._send_request(method, url, body, headers)
File "/usr/lib/python3.4/http/client.py", line 1163, in _send_request
self.endheaders(body)
File "/usr/lib/python3.4/http/client.py", line 951, in _send_output
File "/usr/lib/python3.4/http/client.py", line 1121, in endheaders
self._send_output(message_body)
self.send(msg)
self.connect()
File "/usr/lib/python3.4/http/client.py", line 863, in connect
File "/usr/lib/python3.4/http/client.py", line 886, in send
File "/usr/lib/python3.4/socket.py", line 512, in create_connection
self.timeout, self.source_address)
raise err
File "/usr/lib/python3.4/socket.py", line 503, in create_connection
sock.connect(sa)
ConnectionRefusedError: [Errno 111] Connection refused
A declarative, efficient, and flexible JavaScript library for building user interfaces.
๐ Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. ๐๐๐
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google โค๏ธ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.