Python源码示例:elasticsearch.TransportError()

示例1
def main():
    try:
        args = parse_args()
        es = Elasticsearch(args['es'])
        stepWise(es=es,
                 text=args['text'],
                 indexName=args['index'],
                 analyzer=getAnalyzer(indexName=args['index'],
                                      analyzerName=args['analyzer'],
                                      es=es))

    except KeyboardInterrupt:
        print('Interrupted')
    except AnalyzerNotFound as e:
        print(e.error)
    except TransportError as e:
        print("Unexpected Elasticsearch Transport Exception:")
        print(e.error)
        print(e.info) 
示例2
def get(self, project_id, doc_id, user_id=None):
        try:
            res = self.es.get(index=self.index,
                              doc_type=self.doc_type,
                              id=doc_id)
            doc = res['_source']
        except elasticsearch.TransportError:
            raise freezer_api_exc.DocumentNotFound(
                message='No document found with ID:{0}'.format(doc_id))
        except Exception as e:
            raise freezer_api_exc.StorageEngineError(
                message='Get operation failed: {}'.format(e))
        if doc['project_id'] != project_id:
            raise freezer_api_exc.AccessForbidden("You are not allowed to"
                                                  " access")
        if user_id:
            if doc['user_id'] != user_id:
                raise freezer_api_exc.AccessForbidden(
                    "Document access forbidden"
                )
        if '_version' in res:
            doc['_version'] = res['_version']
        return doc 
示例3
def insert(self, doc, doc_id=None):
        try:
            # remove _version from the document
            doc.pop('_version', None)
            res = self.es.index(index=self.index, doc_type=self.doc_type,
                                body=doc, id=doc_id)
            created = res['created']
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.StorageEngineError(
                message='index operation failed {0}'.format(e))
        except Exception as e:
            raise freezer_api_exc.StorageEngineError(
                message='index operation failed {0}'.format(e))
        return created, version 
示例4
def update(self, job_id, job_update_doc):
        # remove _version from the document
        job_update_doc.pop('_version', 0)
        update_doc = {"doc": job_update_doc}
        try:
            res = self.es.update(index=self.index, doc_type=self.doc_type,
                                 id=job_id, body=update_doc)
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.DocumentNotFound(
                message='Unable to find job to update with id'
                        ' {0} {1}'.format(job_id, e))
        except Exception:
            raise freezer_api_exc.StorageEngineError(
                message='Unable to update job with id {0}'.format(job_id))
        return version 
示例5
def update(self, action_id, action_update_doc):
        # remove _version from the document
        action_update_doc.pop('_version', 0)
        update_doc = {"doc": action_update_doc}
        try:
            res = self.es.update(index=self.index, doc_type=self.doc_type,
                                 id=action_id, body=update_doc)
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.DocumentNotFound(
                message='Unable to find action to update with id'
                        ' {0}'.format(action_id))
        except Exception:
            raise freezer_api_exc.StorageEngineError(
                message='Unable to update action with id'
                        ' {0}'.format(action_id))
        return version 
示例6
def get(self, user_id, doc_id):
        try:
            res = self.es.get(index=self.index,
                              doc_type=self.doc_type,
                              id=doc_id)
            doc = res['_source']
        except elasticsearch.TransportError:
            raise freezer_api_exc.DocumentNotFound(
                message=_i18n._('No document found with ID %s') % doc_id)
        except Exception as e:
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._('Get operation failed: %s') % e)
        if doc['user_id'] != user_id:
            raise freezer_api_exc.AccessForbidden(
                _i18n._("Document access forbidden"))
        if '_version' in res:
            doc['_version'] = res['_version']
        return doc 
示例7
def insert(self, doc, doc_id=None):
        try:
            # remove _version from the document
            doc.pop('_version', None)
            res = self.es.index(index=self.index, doc_type=self.doc_type,
                                body=doc, id=doc_id)
            created = res['created']
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._('index operation failed %s') % e)
        except Exception as e:
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._('index operation failed %s') % e)
        return (created, version) 
示例8
def update(self, job_id, job_update_doc):
        # remove _version from the document
        job_update_doc.pop('_version', 0)
        update_doc = {"doc": job_update_doc}
        try:
            res = self.es.update(index=self.index, doc_type=self.doc_type,
                                 id=job_id, body=update_doc)
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.DocumentNotFound(
                message=_i18n._('Unable to find job to update '
                                'with id %(id)s. %(e)s') % {'id': job_id,
                                                            'e': e})
        except Exception:
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._('Unable to update job with id %s') % job_id)
        return version 
示例9
def update(self, action_id, action_update_doc):
        # remove _version from the document
        action_update_doc.pop('_version', 0)
        update_doc = {"doc": action_update_doc}
        try:
            res = self.es.update(index=self.index, doc_type=self.doc_type,
                                 id=action_id, body=update_doc)
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.DocumentNotFound(
                message=_i18n._('Unable to find action to update '
                                'with id %s') % action_id)
        except Exception:
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._(
                    'Unable to update action with id %s') % action_id)
        return version 
示例10
def update(self, session_id, session_update_doc):
        # remove _version from the document
        session_update_doc.pop('_version', 0)
        update_doc = {"doc": session_update_doc}
        try:
            res = self.es.update(index=self.index, doc_type=self.doc_type,
                                 id=session_id, body=update_doc)
            version = res['_version']
            self.es.indices.refresh(index=self.index)
        except elasticsearch.TransportError as e:
            if e.status_code == 409:
                raise freezer_api_exc.DocumentExists(message=e.error)
            raise freezer_api_exc.DocumentNotFound(
                message=_i18n._('Unable to update session '
                                '%(id)s %(e)s') % {'id': session_id, 'e': e}
            )

        except Exception:
            raise freezer_api_exc.StorageEngineError(
                message=_i18n._(
                    'Unable to update session with id %s') % session_id)
        return version 
示例11
def __call__(self, es, params):
        import elasticsearch
        max_num_segments = params.get("max-num-segments")
        # preliminary support for overriding the global request timeout (see #567). As force-merge falls back to
        # the raw transport API (where the keyword argument is called `timeout`) in some cases we will always need
        # a special handling for the force-merge API.
        request_timeout = params.get("request-timeout")
        try:
            if max_num_segments:
                await es.indices.forcemerge(index=params.get("index"), max_num_segments=max_num_segments, request_timeout=request_timeout)
            else:
                await es.indices.forcemerge(index=params.get("index"), request_timeout=request_timeout)
        except elasticsearch.TransportError as e:
            # this is caused by older versions of Elasticsearch (< 2.1), fall back to optimize
            if e.status_code == 400:
                if max_num_segments:
                    await es.transport.perform_request("POST", f"/_optimize?max_num_segments={max_num_segments}",
                                                       timeout=request_timeout)
                else:
                    await es.transport.perform_request("POST", "/_optimize", timeout=request_timeout)
            else:
                raise e 
示例12
def __call__(self, es, params):
        import elasticsearch
        datafeed_id = mandatory(params, "datafeed-id", self)
        body = mandatory(params, "body", self)
        try:
            await es.xpack.ml.put_datafeed(datafeed_id=datafeed_id, body=body)
        except elasticsearch.TransportError as e:
            # fallback to old path
            if e.status_code == 400:
                await es.transport.perform_request(
                    "PUT",
                    f"/_xpack/ml/datafeeds/{datafeed_id}",
                    body=body,
                )
            else:
                raise e 
示例13
def __call__(self, es, params):
        import elasticsearch
        datafeed_id = mandatory(params, "datafeed-id", self)
        force = params.get("force", False)
        try:
            # we don't want to fail if a datafeed does not exist, thus we ignore 404s.
            await es.xpack.ml.delete_datafeed(datafeed_id=datafeed_id, force=force, ignore=[404])
        except elasticsearch.TransportError as e:
            # fallback to old path (ES < 7)
            if e.status_code == 400:
                await es.transport.perform_request(
                    "DELETE",
                    f"/_xpack/ml/datafeeds/{datafeed_id}",
                    params={
                        "force": escape(force),
                        "ignore": 404
                    },
                )
            else:
                raise e 
示例14
def __call__(self, es, params):
        import elasticsearch
        datafeed_id = mandatory(params, "datafeed-id", self)
        body = params.get("body")
        start = params.get("start")
        end = params.get("end")
        timeout = params.get("timeout")
        try:
            await es.xpack.ml.start_datafeed(datafeed_id=datafeed_id, body=body, start=start, end=end, timeout=timeout)
        except elasticsearch.TransportError as e:
            # fallback to old path (ES < 7)
            if e.status_code == 400:
                await es.transport.perform_request(
                    "POST",
                    f"/_xpack/ml/datafeeds/{datafeed_id}/_start",
                    body=body,
                )
            else:
                raise e 
示例15
def __call__(self, es, params):
        import elasticsearch
        job_id = mandatory(params, "job-id", self)
        body = mandatory(params, "body", self)
        try:
            await es.xpack.ml.put_job(job_id=job_id, body=body)
        except elasticsearch.TransportError as e:
            # fallback to old path (ES < 7)
            if e.status_code == 400:
                await es.transport.perform_request(
                    "PUT",
                    f"/_xpack/ml/anomaly_detectors/{job_id}",
                    body=body,
                )
            else:
                raise e 
示例16
def __call__(self, es, params):
        import elasticsearch
        job_id = mandatory(params, "job-id", self)
        force = params.get("force", False)
        # we don't want to fail if a job does not exist, thus we ignore 404s.
        try:
            await es.xpack.ml.delete_job(job_id=job_id, force=force, ignore=[404])
        except elasticsearch.TransportError as e:
            # fallback to old path (ES < 7)
            if e.status_code == 400:
                await es.transport.perform_request(
                    "DELETE",
                    f"/_xpack/ml/anomaly_detectors/{job_id}",
                    params={
                        "force": escape(force),
                        "ignore": 404
                    },
                )
            else:
                raise e 
示例17
def __call__(self, es, params):
        import elasticsearch
        job_id = mandatory(params, "job-id", self)
        force = params.get("force", False)
        timeout = params.get("timeout")
        try:
            await es.xpack.ml.close_job(job_id=job_id, force=force, timeout=timeout)
        except elasticsearch.TransportError as e:
            # fallback to old path (ES < 7)
            if e.status_code == 400:
                request_params = {
                    "force": escape(force),
                }
                if timeout:
                    request_params["timeout"] = escape(timeout)

                await es.transport.perform_request(
                    "POST",
                    f"/_xpack/ml/anomaly_detectors/{job_id}/_close",
                    params=request_params,
                )
            else:
                raise e 
示例18
def record(self):
        """
        Collect recovery stats for indexes (optionally) specified in telemetry parameters and push to metrics store.
        """
        import elasticsearch

        try:
            stats = self.client.indices.recovery(index=self.indices, active_only=True, detailed=False)
        except elasticsearch.TransportError:
            msg = "A transport error occurred while collecting recovery stats on cluster [{}]".format(self.cluster_name)
            self.logger.exception(msg)
            raise exceptions.RallyError(msg)

        for idx, idx_stats in stats.items():
            for shard in idx_stats["shards"]:
                doc = {
                    "name": "recovery-stats",
                    "shard": shard
                }
                shard_metadata = {
                    "cluster": self.cluster_name,
                    "index": idx,
                    "shard": shard["id"]
                }
                self.metrics_store.put_doc(doc, level=MetaInfoScope.cluster, meta_data=shard_metadata) 
示例19
def test_create_ml_datafeed_fallback(self, es):
        es.xpack.ml.put_datafeed.side_effect = as_future(exception=elasticsearch.TransportError(400, "Bad Request"))
        es.transport.perform_request.return_value = as_future()
        datafeed_id = "some-data-feed"
        body = {
                "job_id": "total-requests",
                "indices": ["server-metrics"]
            }
        params = {
            "datafeed-id": datafeed_id,
            "body": body
        }

        r = runner.CreateMlDatafeed()
        await r(es, params)

        es.transport.perform_request.assert_called_once_with("PUT", f"/_xpack/ml/datafeeds/{datafeed_id}", body=body) 
示例20
def test_delete_ml_datafeed_fallback(self, es):
        es.xpack.ml.delete_datafeed.side_effect = as_future(exception=elasticsearch.TransportError(400, "Bad Request"))
        es.transport.perform_request.return_value = as_future()
        datafeed_id = "some-data-feed"
        params = {
            "datafeed-id": datafeed_id,
        }

        r = runner.DeleteMlDatafeed()
        await r(es, params)

        es.transport.perform_request.assert_called_once_with("DELETE",
                                                             f"/_xpack/ml/datafeeds/{datafeed_id}",
                                                             params={
                                                                 "force": "false",
                                                                 "ignore": 404
                                                             }) 
示例21
def test_stop_ml_datafeed_fallback(self, es):
        es.xpack.ml.stop_datafeed.side_effect = as_future(exception=elasticsearch.TransportError(400, "Bad Request"))
        es.transport.perform_request.return_value = as_future()

        params = {
            "datafeed-id": "some-data-feed",
            "force": random.choice([False, True]),
            "timeout": "5s"
        }

        r = runner.StopMlDatafeed()
        await r(es, params)

        es.transport.perform_request.assert_called_once_with("POST",
                                                             f"/_xpack/ml/datafeeds/{params['datafeed-id']}/_stop",
                                                             params={
                                                                 "force": str(params["force"]).lower(),
                                                                 "timeout": params["timeout"]
                                                             }) 
示例22
def test_delete_ml_job_fallback(self, es):
        es.xpack.ml.delete_job.side_effect = as_future(exception=elasticsearch.TransportError(400, "Bad Request"))
        es.transport.perform_request.return_value = as_future()

        job_id = "an-ml-job"
        params = {
            "job-id": job_id
        }

        r = runner.DeleteMlJob()
        await r(es, params)

        es.transport.perform_request.assert_called_once_with("DELETE",
                                                             f"/_xpack/ml/anomaly_detectors/{params['job-id']}",
                                                             params={
                                                                 "force": "false",
                                                                 "ignore": 404
                                                             }) 
示例23
def test_close_ml_job_fallback(self, es):
        es.xpack.ml.close_job.side_effect = as_future(exception=elasticsearch.TransportError(400, "Bad Request"))
        es.transport.perform_request.return_value = as_future()

        params = {
            "job-id": "an-ml-job",
            "force": random.choice([False, True]),
            "timeout": "5s"
        }

        r = runner.CloseMlJob()
        await r(es, params)

        es.transport.perform_request.assert_called_once_with("POST",
                                                             f"/_xpack/ml/anomaly_detectors/{params['job-id']}/_close",
                                                             params={
                                                                 "force": str(params["force"]).lower(),
                                                                 "timeout": params["timeout"]
                                                             }) 
示例24
def test_multi_missing(data_client):
    s1 = Repository.search()
    s2 = Search(index='flat-git')
    s3 = Search(index='does_not_exist')

    ms = MultiSearch()
    ms = ms.add(s1).add(s2).add(s3)

    with raises(TransportError):
        ms.execute()

    r1, r2, r3 = ms.execute(raise_on_error=False)

    assert 1 == len(r1)
    assert isinstance(r1[0], Repository)
    assert r1._search is s1

    assert 52 == r2.hits.total.value
    assert r2._search is s2

    assert r3 is None 
示例25
def elastic_update_request(item: dict) -> dict:
        try:
            elastic = elasticsearch.Elasticsearch(es_url)
            response = elastic.update(
                index=item["index"],
                doc_type=item["doc_type"] if "doc_type" in item else item["index"],
                id=item["id"],
                body={"doc": item["changes"]}
            )
            return response

        except elasticsearch.TransportError as e:
            # Will return the appropriate error message along with the status code.
            logging.getLogger(ERROR_LOGGER).exception(e)
            raise ElasticTransportError(e.error)

        except Exception as e:
            logging.getLogger(ERROR_LOGGER).exception(e)
            raise APIException("There has been an unidentified error in the backend, please contact the developers about this issue.") 
示例26
def remove(self, obj_or_string, commit=True):
        """
        Removes an object from the index.
        :param obj_or_string:
        :param commit:
        """
        if not self.setup_complete:
            try:
                self.setup()
            except elasticsearch.TransportError as e:
                if not self.silently_fail:
                    raise
                doc_id = get_identifier(obj_or_string)
                self.log.error("Failed to remove document '%s' from Elasticsearch: %s", doc_id, e)
                return

        for language in self.languages:
            # self.log.debug('removing {0} from index {1}'.format(obj_or_string, language))
            self.index_name = self._index_name_for_language(language)
            with translation.override(language):
                super(ElasticsearchMultilingualSearchBackend, self).remove(obj_or_string,
                                                                           commit=commit) 
示例27
def search_fields(self):
        field_query = self.request.json_body.get('field')

        es_query = {
            "size": 15,
            "query": {
                "match" : { "name" : field_query }
            }
        }
        try:
            es_results = config.es.search(
                index='data_explorer_fields',
                doc_type='flywheel_field',
                body=es_query
            )
        except TransportError as e:
            config.log.warning('Fields not yet indexed for search: {}'.format(e))
            return []

        results = []
        for result in es_results['hits']['hits']:
            results.append(result['_source'])

        return results 
示例28
def test_fallback_gracefully(self):
        # Note: can't use override settings because of how restframework handle settings :(
        #from django_elasticsearch.tests.urls import TestViewSet
        from rest_framework.filters import DjangoFilterBackend, OrderingFilter
        from rest_framework.settings import api_settings

        api_settings.DEFAULT_FILTER_BACKENDS = (DjangoFilterBackend, OrderingFilter)
        # TODO: better way to fake es cluster's death ?

        with mock.patch.object(es_client, 'search') as mock_search:
            mock_search.side_effect = TransportError()
            with mock.patch.object(es_client, 'count') as mock_count:
                mock_count.side_effect = TransportError()
                with mock.patch.object(es_client, 'get') as mock_get:
                    mock_get.side_effect = TransportError()
                    # should fallback to a regular django queryset / filtering
                    r = self.client.get('/rf/tests/')
                    self.assertEqual(r.status_code, 200)
                    self.assertEqual(r.data['filter_status'], 'Failed')
                    self.assertEqual(r.data['count'], 3)
                    self._test_filter_backend_filters()
                    self._test_pagination() 
示例29
def test_that_itersearch_clears_scroll_on_successful_scroll(self):
        for docs, meta in self.ss.itersearch(index=self._index,
                                             doc_type=self._doc_type,
                                             body=dict(
                                                 query=dict(match_all={})),
                                             scroll='10m', size=100,
                                             with_meta=True):
            scroll_id = meta['_scroll_id']
        # check if it was the right exception
        self.assertRaises(TransportError, self.es.scroll, scroll_id)
        try:
            self.es.scroll(scroll_id)
        except TransportError, err:
            self.assertTrue('SearchContextMissingException' in str(err)) 
示例30
def es_query_total(cls, cluster: Elasticsearch, index: str, group: str, **kwargs) -> 'MetricMonitor':
        def fetch_stat() -> Optional[float]:
            try:
                response = cluster.indices.stats(index=index, groups=[group], metric='search')
            except elasticsearch.NotFoundError:
                # If our index doesn't exist we can't possibly allow things
                # to continue. Report the metric unavailable and wait for
                # the index to exist.
                log.exception('Index not found while fetching index stats for %s', index)
                return None
            except elasticsearch.TransportError:
                # Connection error to elasticsearch, could be network, restarts, etc.
                log.exception('Transport error while fetching index stats for %s', index)
                return None

            try:
                query_total = response['_all']['total']['search']['groups'][group]['query_total']
                log.debug('Group %s in index %s reported query_total of %d', group, index, query_total)
                return query_total
            except KeyError:
                # Typically this means the group hasn't collected any stats.
                # This could happen after a full cluster restart but before any
                # prod traffic is run through. I'm a bit wary of always
                # returning 0, but it is correct.
                log.info('No stats in index %s for group %s', index, group)
                return 0.0
        return cls(fetch_stat, StreamingEMA(), **kwargs)