diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f9edcf2c8..f1996d60a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -46,6 +46,7 @@ jobs: name: Build Catalog runs-on: [ ubuntu-latest ] needs: [ build_analyzers, build_responders ] + if: always() steps: - name: Checkout uses: actions/checkout@v3 @@ -77,7 +78,7 @@ jobs: name: Build documentation runs-on: [ ubuntu-latest ] needs: [build_analyzers, build_responders ] - if: startsWith(github.ref, 'refs/tags/') + if: startsWith(github.ref, 'refs/tags/') && always() steps: - uses: actions/checkout@v3 - name: Prepare documentation files diff --git a/analyzers/Autofocus/Dockerfile b/analyzers/Autofocus/Dockerfile new file mode 100644 index 000000000..76363bf39 --- /dev/null +++ b/analyzers/Autofocus/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11 +WORKDIR /worker +COPY . Autofocus +RUN test ! -e Autofocus/requirements.txt || pip install --no-cache-dir -r Autofocus/requirements.txt +ENTRYPOINT Autofocus/analyzer.py diff --git a/analyzers/Elasticsearch/elk.py b/analyzers/Elasticsearch/elk.py index b4f010bf4..5a5c84475 100755 --- a/analyzers/Elasticsearch/elk.py +++ b/analyzers/Elasticsearch/elk.py @@ -1,342 +1,342 @@ -#!/usr/bin/env python3 - -from elasticsearch import Elasticsearch -from cortexutils.analyzer import Analyzer -import dateutil.parser -from datetime import datetime -import pytz - -# utils -import operator - -class Hit: - def __init__(self,hitindex,hitid,process_parent_name,process_name,process_args,user_name,host_name,timestamp,time,\ - destination_ip,destination_port,source_ip,source_port,source_user_name,url_domain,url_path,url_full,\ - rule_category,dns_question_name,dns_resolvedip): - self.hitindex = hitindex - self.hitid = hitid - self.process_parent_name = process_parent_name - self.process_name = process_name - self.process_args = process_args - self.host_name = host_name - self.user_name = user_name - self.timestamp = timestamp - self.time = time - self.url_domain = url_domain - self.url_path = url_path - self.url_full = url_full - self.source_ip = source_ip - self.source_port = source_port - self.source_user_name = source_user_name - self.destination_ip = destination_ip - self.destination_port = destination_port - self.rule_category = rule_category - self.dns_question_name = dns_question_name - self.dns_resolvedip = dns_resolvedip - -class ElasticsearchAnalyzer(Analyzer): - # Analyzer's constructor - def __init__(self): - # Call the constructor of the super class - Analyzer.__init__(self) - - - self.endpoints = self.get_param('config.endpoints', None, 'Elasticsearch endpoint is missing') - self.kibana = self.get_param('config.kibana', None, None) - self.index = self.get_param('config.index', None, 'Elasticsearch index is missing') - self.keys = self.get_param('config.keys', None, None) - self.users = self.get_param('config.users', None, None) - self.passwords = self.get_param('config.passwords', None, None) - self.dashboard = self.get_param('config.dashboard', None, None) - self.verify = self.get_param('config.verifyssl', True, None) - self.cert = self.get_param('config.cert_path', None, None) - self.fields = self.get_param('config.field', None, 'Field is missing') - self.data = self.get_param('data', None, 'Data is missing') - self.size = self.get_param('config.size', None, 'size is missing') - - - def summary(self, raw): - taxonomies = [] - namespace = "ELK" - predicate = "Hit(s)" - - value = "{}".format(raw['info']['hitcount']) - if raw['info']['hitcount'] > 0: - level = "suspicious" - else: - level = "safe" - - taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) - - return {"taxonomies": taxonomies} - - def artifacts(self, raw): - artifacts = [] - domains = [] - urls = [] - ips = [] - - for hit in raw['hits']: - #domains - if 'url_domain' in hit: - if isinstance(hit['url_domain'],list): - for domain in hit['url_domain']: - domains.append(domain) - else: - domains.append(hit['url_domain']) - if 'dns_question_name' in hit: - if isinstance(hit['dns_question_name'],list): - for domain in hit['dns_question_name']: - domains.append(domain) - else: - domains.append(hit['dns_question_name']) - #urls - if 'url_full' in hit: - if isinstance(hit['url_full'],list): - for url in hit['url_full']: - urls.append(url) - - #ips - if 'source_ip' in hit: - if isinstance(hit['source_ip'],list): - for ip in hit['source_ip']: - ips.append(ip) - else: - ips.append(hit['source_ip']) - if 'destination_ip' in hit: - if isinstance(hit['destination_ip'],list): - for ip in hit['destination_ip']: - ips.append(ip) - else: - ips.append(hit['destination_ip']) - if 'dns_resolvedip' in hit: - if isinstance(hit['dns_resolvedip'],list): - for ip in hit['dns_resolvedip']: - ips.append(ip) - else: - ips.append(hit['dns_resolvedip']) - - - domains = list(set(domains)) - for domain in domains: - if domain != "": - observable = {'dataType' : 'domain', 'data' : domain, 'message' : 'domain from elastic'} - artifacts.append(observable) - urls = list(set(urls)) - for url in urls: - if url != "": - observable = {'dataType' : 'url', 'data' : url, 'message' : 'url from elastic'} - artifacts.append(observable) - ips = list(set(ips)) - for ip in ips: - if ip != "": - observable = {'dataType' : 'ip', 'data' : ip, 'message' : 'ip from elastic'} - artifacts.append(observable) - - return artifacts - - def run(self): - Analyzer.run(self) - try: - for endpoint,key,user,password in zip(self.endpoints,self.keys,self.users,self.passwords): - if key: - es = Elasticsearch( - endpoint, - api_key = (key), - ca_certs=self.cert, - verify_certs=self.verify, - timeout=30 - ) - elif user: - es = Elasticsearch( - endpoint, - http_auth = (user,password), - ca_certs=self.cert, - verify_certs=self.verify, - timeout=30 - ) - else: - es = Elasticsearch( - endpoint, - ca_certs=self.cert, - verify_certs=self.verify, - timeout=30 - ) - - info = {} - hits = [] - devices = [] - total = 'eq' - #url that links to kibana dashboard - info['query'] = "" - #query string to show kql search - info['querystring'] = "" - self.fields = [x.lower() for x in self.fields] - #remove all hash fields if not a hash - if self.data_type != 'hash': - self.fields = list(filter( lambda s: not ("hash" in s), self.fields)) - #remove all ip fields if not an ip - if self.data_type != 'ip': - self.fields = list(filter( lambda s: not ("ip" in s), self.fields)) - #remove all url and domain fields if not a url or domain or fqdn - if self.data_type != 'domain' and self.data_type != 'url' and self.data_type != 'fqdn': - self.fields = list(filter( lambda s: not ("url" in s or "domain" in s), self.fields)) - if self.kibana and self.dashboard: - #building query - info['query'] += self.kibana+"/app/kibana#/dashboard/"+self.dashboard+\ - "?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-1M,to:now))&_a=(columns:!(_source),interval:auto,query:(language:kuery,query:'" - #building query and query string - info['query'] += self.fields[0] + "%20:%20%22" + self.data - info['querystring'] = self.fields[0] + ':"' + self.data - for field in self.fields[1:]: - info['query'] += "%22%20or%20" + field + "%20:%20%22" + self.data - info['querystring'] += '" or ' + field + ':"' + self.data - info['query'] += "%22'),sort:!(!(start_time,desc)))" - info['querystring'] += '"' - #loop to get hits from each index - for index in self.index: - #search elastic for fields in each index - res = es.search(size=self.size,index=index,body={'sort':[{"@timestamp":{"order":"desc"}}],'query':{'multi_match':{'query':self.data, 'fields':self.fields}}}) - #if relation is gte then more logs exist than we will display - if res['hits']['total']['relation'] == 'gte' or res['hits']['total']['relation'] == 'gt': - total = 'gte' - #adding results from each query - for hit in res['hits']['hits']: - hitindex = hit['_index'] - hitid = hit['_id'] - #process fields - process_parent_name = "" - process_name = "" - process_args = "" - #user fields - user_name = "" - #host fields - host_name = "" - #base fields - timestamp = "" - #destination fields - destination_ip = "" - destination_port = "" - #source fields - source_ip = "" - source_port = "" - source_user_name = "" - #event fields - event_action = "" - #url fields - url_domain = "" - url_path = "" - url_full = "" - #dns fields - dns_question_name = "" - dns_resolvedip = "" - #rule fields - rule_category = "" - - #base fields - if '@timestamp' in hit['_source']: - if isinstance(hit['_source']['@timestamp'],str): - timestamp = dateutil.parser.parse(hit['_source']['@timestamp']) - time = timestamp.astimezone().strftime("%m/%d/%Y %I:%M %p") - timestamp = str(timestamp) - else: - timestamp = dateutil.parser.parse(datetime.fromtimestamp(float(hit['_source']['@timestamp']/1000)).strftime('%c')) - time = timestamp.astimezone().strftime("%m/%d/%Y %I:%M %p") - timestamp = str(timestamp) - #host fields - if 'host' in hit['_source']: - if 'name' in hit['_source']['host']: - host_name = hit['_source']['host']['name'] - #process fields - if 'process' in hit['_source']: - if 'parent' in hit['_source']['process']: - if 'name' in hit['_source']['process']['parent']: - process_parent_name = hit['_source']['process']['parent']['name'] - if 'name' in hit['_source']['process']: - process_name = hit['_source']['process']['name'] - if 'args' in hit['_source']['process']: - process_args = hit['_source']['process']['args'] - #destination fields - if 'destination' in hit['_source']: - if 'ip' in hit['_source']['destination']: - destination_ip = hit['_source']['destination']['ip'] - if 'port' in hit['_source']['destination']: - destination_port = hit['_source']['destination']['port'] - #source fields - if 'source' in hit['_source']: - if 'ip' in hit['_source']['source']: - source_ip = hit['_source']['source']['ip'] - if 'port' in hit['_source']['source']: - source_port = hit['_source']['source']['port'] - if 'user' in hit['_source']['source']: - if 'name' in hit['_source']['source']['user']: - source_user_name = hit['_source']['source']['user']['name'] - #event fields - if 'event' in hit['_source']: - if 'action' in hit['_source']['event']: - event_action = hit['_source']['event']['action'] - #url fields - if 'url' in hit['_source']: - if 'domain' in hit['_source']['url']: - url_domain = hit['_source']['url']['domain'] - if 'path' in hit['_source']['url']: - url_path = hit['_source']['url']['path'] - if 'full' in hit['_source']['url']: - url_full = hit['_source']['url']['full'] - #user fields - if 'user' in hit['_source']: - if 'name' in hit['_source']['user']: - user_name = hit['_source']['user']['name'] - #rule fields - if 'rule' in hit['_source']: - if 'category' in hit['_source']['rule']: - rule_category = hit['_source']['rule']['category'] - #dns fields - if 'dns' in hit['_source']: - if 'question' in hit['_source']['dns']: - if 'name' in hit['_source']['dns']['question']: - dns_question_name = hit['_source']['dns']['question']['name'] - if 'resolved_ip' in hit['_source']['dns']: - dns_resolvedip = hit['_source']['dns']['resolved_ip'] - - - hits.append(Hit(hitindex,hitid,process_parent_name,process_name,process_args,user_name,host_name,\ - timestamp,time,destination_ip,destination_port,source_ip,source_port,source_user_name,\ - url_domain,url_path,url_full,rule_category,dns_question_name,dns_resolvedip)) - - #setup users - usernames = [item.user_name for item in hits] - source_usernames = [item.source_user_name for item in hits] - usernames.extend(source_usernames) - info['uniqueusers'] = list(set(usernames)) - if "" in info['uniqueusers']: - info['uniqueusers'].remove("") - info['userhitcount'] = len(info['uniqueusers']) - - #setup devices - devices = [item.host_name for item in hits] - info['uniquedevices'] = list(set(devices)) - if "" in info['uniquedevices']: - info['uniquedevices'].remove("") - info['devicehitcount'] = len(info['uniquedevices']) - - #observable that was searched on - info['data'] = self.data - info['dashboard'] = self.dashboard - info['hitcount'] = len(hits) - - #sort the hits based on timestamp - hits.sort(key=operator.attrgetter('timestamp'), reverse=True) - hits = [ob.__dict__ for ob in hits] - - self.report({'hits' : hits, - 'info' : info, - 'total': total}) - - except Exception as e: - self.unexpectedError(e) - return - - -if __name__ == '__main__': - ElasticsearchAnalyzer().run() \ No newline at end of file +#!/usr/bin/env python3 + +from elasticsearch import Elasticsearch +from cortexutils.analyzer import Analyzer +import dateutil.parser +from datetime import datetime +import pytz + +# utils +import operator + +class Hit: + def __init__(self,hitindex,hitid,process_parent_name,process_name,process_args,user_name,host_name,timestamp,time,\ + destination_ip,destination_port,source_ip,source_port,source_user_name,url_domain,url_path,url_full,\ + rule_category,dns_question_name,dns_resolvedip): + self.hitindex = hitindex + self.hitid = hitid + self.process_parent_name = process_parent_name + self.process_name = process_name + self.process_args = process_args + self.host_name = host_name + self.user_name = user_name + self.timestamp = timestamp + self.time = time + self.url_domain = url_domain + self.url_path = url_path + self.url_full = url_full + self.source_ip = source_ip + self.source_port = source_port + self.source_user_name = source_user_name + self.destination_ip = destination_ip + self.destination_port = destination_port + self.rule_category = rule_category + self.dns_question_name = dns_question_name + self.dns_resolvedip = dns_resolvedip + +class ElasticsearchAnalyzer(Analyzer): + # Analyzer's constructor + def __init__(self): + # Call the constructor of the super class + Analyzer.__init__(self) + + + self.endpoints = self.get_param('config.endpoints', None, 'Elasticsearch endpoint is missing') + self.kibana = self.get_param('config.kibana', None, None) + self.index = self.get_param('config.index', None, 'Elasticsearch index is missing') + self.keys = self.get_param('config.keys', None, None) + self.users = self.get_param('config.users', None, None) + self.passwords = self.get_param('config.passwords', None, None) + self.dashboard = self.get_param('config.dashboard', None, None) + self.verify = self.get_param('config.verifyssl', True, None) + self.cert = self.get_param('config.cert_path', None, None) + self.fields = self.get_param('config.field', None, 'Field is missing') + self.data = self.get_param('data', None, 'Data is missing') + self.size = self.get_param('config.size', None, 'size is missing') + + + def summary(self, raw): + taxonomies = [] + namespace = "ELK" + predicate = "Hit(s)" + + value = "{}".format(raw['info']['hitcount']) + if raw['info']['hitcount'] > 0: + level = "suspicious" + else: + level = "safe" + + taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) + + return {"taxonomies": taxonomies} + + def artifacts(self, raw): + artifacts = [] + domains = [] + urls = [] + ips = [] + + for hit in raw['hits']: + #domains + if 'url_domain' in hit: + if isinstance(hit['url_domain'],list): + for domain in hit['url_domain']: + domains.append(domain) + else: + domains.append(hit['url_domain']) + if 'dns_question_name' in hit: + if isinstance(hit['dns_question_name'],list): + for domain in hit['dns_question_name']: + domains.append(domain) + else: + domains.append(hit['dns_question_name']) + #urls + if 'url_full' in hit: + if isinstance(hit['url_full'],list): + for url in hit['url_full']: + urls.append(url) + + #ips + if 'source_ip' in hit: + if isinstance(hit['source_ip'],list): + for ip in hit['source_ip']: + ips.append(ip) + else: + ips.append(hit['source_ip']) + if 'destination_ip' in hit: + if isinstance(hit['destination_ip'],list): + for ip in hit['destination_ip']: + ips.append(ip) + else: + ips.append(hit['destination_ip']) + if 'dns_resolvedip' in hit: + if isinstance(hit['dns_resolvedip'],list): + for ip in hit['dns_resolvedip']: + ips.append(ip) + else: + ips.append(hit['dns_resolvedip']) + + + domains = list(set(domains)) + for domain in domains: + if domain != "": + observable = {'dataType' : 'domain', 'data' : domain, 'message' : 'domain from elastic'} + artifacts.append(observable) + urls = list(set(urls)) + for url in urls: + if url != "": + observable = {'dataType' : 'url', 'data' : url, 'message' : 'url from elastic'} + artifacts.append(observable) + ips = list(set(ips)) + for ip in ips: + if ip != "": + observable = {'dataType' : 'ip', 'data' : ip, 'message' : 'ip from elastic'} + artifacts.append(observable) + + return artifacts + + def run(self): + Analyzer.run(self) + try: + for endpoint,key,user,password in zip(self.endpoints,self.keys,self.users,self.passwords): + if key: + es = Elasticsearch( + endpoint, + api_key = (key), + ca_certs=self.cert, + verify_certs=self.verify, + timeout=30 + ) + elif user: + es = Elasticsearch( + endpoint, + http_auth = (user,password), + ca_certs=self.cert, + verify_certs=self.verify, + timeout=30 + ) + else: + es = Elasticsearch( + endpoint, + ca_certs=self.cert, + verify_certs=self.verify, + timeout=30 + ) + + info = {} + hits = [] + devices = [] + total = 'eq' + #url that links to kibana dashboard + info['query'] = "" + #query string to show kql search + info['querystring'] = "" + self.fields = [x.lower() for x in self.fields] + #remove all hash fields if not a hash + if self.data_type != 'hash': + self.fields = list(filter( lambda s: not ("hash" in s), self.fields)) + #remove all ip fields if not an ip + if self.data_type != 'ip': + self.fields = list(filter( lambda s: not ("ip" in s), self.fields)) + #remove all url and domain fields if not a url or domain or fqdn + if self.data_type != 'domain' and self.data_type != 'url' and self.data_type != 'fqdn': + self.fields = list(filter( lambda s: not ("url" in s or "domain" in s), self.fields)) + if self.kibana and self.dashboard: + #building query + info['query'] += self.kibana+"/app/kibana#/dashboard/"+self.dashboard+\ + "?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-1M,to:now))&_a=(columns:!(_source),interval:auto,query:(language:kuery,query:'" + #building query and query string + info['query'] += self.fields[0] + "%20:%20%22" + self.data + info['querystring'] = self.fields[0] + ':"' + self.data + for field in self.fields[1:]: + info['query'] += "%22%20or%20" + field + "%20:%20%22" + self.data + info['querystring'] += '" or ' + field + ':"' + self.data + info['query'] += "%22'),sort:!(!(start_time,desc)))" + info['querystring'] += '"' + #loop to get hits from each index + for index in self.index: + #search elastic for fields in each index + res = es.search(size=self.size,index=index,body={'sort':[{"@timestamp":{"order":"desc"}}],'query':{'multi_match':{'query':self.data, 'fields':self.fields}}}) + #if relation is gte then more logs exist than we will display + if res['hits']['total']['relation'] == 'gte' or res['hits']['total']['relation'] == 'gt': + total = 'gte' + #adding results from each query + for hit in res['hits']['hits']: + hitindex = hit['_index'] + hitid = hit['_id'] + #process fields + process_parent_name = "" + process_name = "" + process_args = "" + #user fields + user_name = "" + #host fields + host_name = "" + #base fields + timestamp = "" + #destination fields + destination_ip = "" + destination_port = "" + #source fields + source_ip = "" + source_port = "" + source_user_name = "" + #event fields + event_action = "" + #url fields + url_domain = "" + url_path = "" + url_full = "" + #dns fields + dns_question_name = "" + dns_resolvedip = "" + #rule fields + rule_category = "" + + #base fields + if '@timestamp' in hit['_source']: + if isinstance(hit['_source']['@timestamp'],str): + timestamp = dateutil.parser.parse(hit['_source']['@timestamp']) + time = timestamp.astimezone().strftime("%m/%d/%Y %I:%M %p") + timestamp = str(timestamp) + else: + timestamp = dateutil.parser.parse(datetime.fromtimestamp(float(hit['_source']['@timestamp']/1000)).strftime('%c')) + time = timestamp.astimezone().strftime("%m/%d/%Y %I:%M %p") + timestamp = str(timestamp) + #host fields + if 'host' in hit['_source']: + if 'name' in hit['_source']['host']: + host_name = hit['_source']['host']['name'] + #process fields + if 'process' in hit['_source']: + if 'parent' in hit['_source']['process']: + if 'name' in hit['_source']['process']['parent']: + process_parent_name = hit['_source']['process']['parent']['name'] + if 'name' in hit['_source']['process']: + process_name = hit['_source']['process']['name'] + if 'args' in hit['_source']['process']: + process_args = hit['_source']['process']['args'] + #destination fields + if 'destination' in hit['_source']: + if 'ip' in hit['_source']['destination']: + destination_ip = hit['_source']['destination']['ip'] + if 'port' in hit['_source']['destination']: + destination_port = hit['_source']['destination']['port'] + #source fields + if 'source' in hit['_source']: + if 'ip' in hit['_source']['source']: + source_ip = hit['_source']['source']['ip'] + if 'port' in hit['_source']['source']: + source_port = hit['_source']['source']['port'] + if 'user' in hit['_source']['source']: + if 'name' in hit['_source']['source']['user']: + source_user_name = hit['_source']['source']['user']['name'] + #event fields + if 'event' in hit['_source']: + if 'action' in hit['_source']['event']: + event_action = hit['_source']['event']['action'] + #url fields + if 'url' in hit['_source']: + if 'domain' in hit['_source']['url']: + url_domain = hit['_source']['url']['domain'] + if 'path' in hit['_source']['url']: + url_path = hit['_source']['url']['path'] + if 'full' in hit['_source']['url']: + url_full = hit['_source']['url']['full'] + #user fields + if 'user' in hit['_source']: + if 'name' in hit['_source']['user']: + user_name = hit['_source']['user']['name'] + #rule fields + if 'rule' in hit['_source']: + if 'category' in hit['_source']['rule']: + rule_category = hit['_source']['rule']['category'] + #dns fields + if 'dns' in hit['_source']: + if 'question' in hit['_source']['dns']: + if 'name' in hit['_source']['dns']['question']: + dns_question_name = hit['_source']['dns']['question']['name'] + if 'resolved_ip' in hit['_source']['dns']: + dns_resolvedip = hit['_source']['dns']['resolved_ip'] + + + hits.append(Hit(hitindex,hitid,process_parent_name,process_name,process_args,user_name,host_name,\ + timestamp,time,destination_ip,destination_port,source_ip,source_port,source_user_name,\ + url_domain,url_path,url_full,rule_category,dns_question_name,dns_resolvedip)) + + #setup users + usernames = [item.user_name for item in hits] + source_usernames = [item.source_user_name for item in hits] + usernames.extend(source_usernames) + info['uniqueusers'] = list(set(usernames)) + if "" in info['uniqueusers']: + info['uniqueusers'].remove("") + info['userhitcount'] = len(info['uniqueusers']) + + #setup devices + devices = [item.host_name for item in hits] + info['uniquedevices'] = list(set(devices)) + if "" in info['uniquedevices']: + info['uniquedevices'].remove("") + info['devicehitcount'] = len(info['uniquedevices']) + + #observable that was searched on + info['data'] = self.data + info['dashboard'] = self.dashboard + info['hitcount'] = len(hits) + + #sort the hits based on timestamp + hits.sort(key=operator.attrgetter('timestamp'), reverse=True) + hits = [ob.__dict__ for ob in hits] + + self.report({'hits' : hits, + 'info' : info, + 'total': total}) + + except Exception as e: + self.unexpectedError(e) + return + + +if __name__ == '__main__': + ElasticsearchAnalyzer().run() diff --git a/analyzers/Maltiverse/Dockerfile b/analyzers/Maltiverse/Dockerfile index 197bc9db9..a84c8c0c2 100644 --- a/analyzers/Maltiverse/Dockerfile +++ b/analyzers/Maltiverse/Dockerfile @@ -1,5 +1,6 @@ FROM python:3-slim WORKDIR /worker COPY . Maltiverse +RUN apt update && apt install -y git RUN test ! -e Maltiverse/requirements.txt || pip install --no-cache-dir -r Maltiverse/requirements.txt ENTRYPOINT Maltiverse/maltiverse-client.py diff --git a/analyzers/MalwareClustering/requirements.txt b/analyzers/MalwareClustering/requirements.txt index cb46adae6..4c62cdeaf 100644 --- a/analyzers/MalwareClustering/requirements.txt +++ b/analyzers/MalwareClustering/requirements.txt @@ -1,6 +1,7 @@ cortexutils requests pyimpfuzzy==0.5 -py2neo==2021.0.1 +# py2neo is EOL and older versions were deleted from pipy https://github.com/neo4j-contrib/py2neo +py2neo==2021.2.4 apiscout==1.1.5 python-magic==0.4.22 \ No newline at end of file diff --git a/analyzers/RecordedFuture/README.md b/analyzers/RecordedFuture/README.md new file mode 100644 index 000000000..d95c1b1f0 --- /dev/null +++ b/analyzers/RecordedFuture/README.md @@ -0,0 +1,27 @@ +This analyzer will return Recorded Future Intelligence for the following datatypes: +* ip +* domain +* fqdn +* hash +* url + +Enriched observables can display: +* Risk Summary: Risk Score, Criticality, and link to the Intelligence Card +* Recorded Future AI Insights + +![](assets/RecordedFutureAnalyzerReport.jpg) + +* Risk Rules and Evidence Details + +![](assets/RiskRulesReport.jpg) + +* Technical & Insikt Group Research Links + +![](assets/LinksReport.jpg) + +* Related Threat Actors +* Related Attack Vectors +* Malware Family / Category +* Related IPs +* Related Domains +* Related Hashes \ No newline at end of file diff --git a/analyzers/RecordedFuture/RecordedFuture.json b/analyzers/RecordedFuture/RecordedFuture.json new file mode 100644 index 000000000..2bf27be95 --- /dev/null +++ b/analyzers/RecordedFuture/RecordedFuture.json @@ -0,0 +1,27 @@ +{ + "name": "RecordedFuture", + "version": "2.0", + "author": "Recorded Future", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "dataTypeList": ["ip", "domain", "fqdn", "hash", "url"], + "description": "Enrich IP, Domain, FQDN, URL, or Hash with Recorded Future context: Risk Score, Risk Details, AI Insights, Links, Threat Actor, Attack Vector, Malware Category / Family, and Related Entities (IPs, Domains, and Hashes)", + "command": "RecordedFuture/recordedfuture.py", + "baseConfig": "RecordedFuture", + "configurationItems": [ + { + "name": "key", + "description": "API Token", + "type": "string", + "multi": false, + "required": true + } + ], + "registration_required": true, + "subscription_required": true, + "service_homepage": "https://www.recordedfuture.com/", + "service_logo": { + "path": "assets/recordedfuture-logo.png", + "caption": "logo" + } +} \ No newline at end of file diff --git a/analyzers/RecordedFuture/RecordedFuture_risk.json b/analyzers/RecordedFuture/RecordedFuture_risk.json deleted file mode 100644 index ea64739fb..000000000 --- a/analyzers/RecordedFuture/RecordedFuture_risk.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "RecordedFuture_risk", - "version": "1.0", - "author": "KAPSCH-CDC", - "url": "https://github.com/kapschcdc/Cortex-Analyzers", - "license": "AGPL-V3", - "description": "Get the latest risk data from RecordedFuture for a hash, domain or an IP address.", - "dataTypeList": ["domain", "ip", "hash"], - "command": "RecordedFuture/recordedfuture.py", - "baseConfig": "RecordedFuture", - "configurationItems": [ - { - "name": "key", - "description": "API key for RecordedFuture", - "type": "string", - "multi": false, - "required": true - } - ] -} diff --git a/analyzers/RecordedFuture/assets/LinksReport.jpg b/analyzers/RecordedFuture/assets/LinksReport.jpg new file mode 100644 index 000000000..ba7d222ec Binary files /dev/null and b/analyzers/RecordedFuture/assets/LinksReport.jpg differ diff --git a/analyzers/RecordedFuture/assets/RecordedFutureAnalyzerReport.jpg b/analyzers/RecordedFuture/assets/RecordedFutureAnalyzerReport.jpg new file mode 100644 index 000000000..1a84f1a96 Binary files /dev/null and b/analyzers/RecordedFuture/assets/RecordedFutureAnalyzerReport.jpg differ diff --git a/analyzers/RecordedFuture/assets/RiskRulesReport.jpg b/analyzers/RecordedFuture/assets/RiskRulesReport.jpg new file mode 100644 index 000000000..c844b7d4a Binary files /dev/null and b/analyzers/RecordedFuture/assets/RiskRulesReport.jpg differ diff --git a/analyzers/RecordedFuture/assets/recordedfuture-logo.png b/analyzers/RecordedFuture/assets/recordedfuture-logo.png new file mode 100644 index 000000000..2da3ffadd Binary files /dev/null and b/analyzers/RecordedFuture/assets/recordedfuture-logo.png differ diff --git a/analyzers/RecordedFuture/recordedfuture.py b/analyzers/RecordedFuture/recordedfuture.py index 418a0402e..65d6c2726 100755 --- a/analyzers/RecordedFuture/recordedfuture.py +++ b/analyzers/RecordedFuture/recordedfuture.py @@ -1,56 +1,463 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +""" +##################################### TERMS OF USE ########################################### +# The following code is provided for demonstration purpose only, and should not be used # +# without independent verification. Recorded Future makes no representations or warranties, # +# express, implied, statutory, or otherwise, regarding any aspect of this code or of the # +# information it may retrieve, and provides it both strictly “as-is” and without assuming # +# responsibility for any information it may retrieve. Recorded Future shall not be liable # +# for, and you assume all risk of using, the foregoing. By using this code, Customer # +# represents that it is solely responsible for having all necessary licenses, permissions, # +# rights, and/or consents to connect to third party APIs, and that it is solely responsible # +# for having all necessary licenses, permissions, rights, and/or consents to any data # +# accessed from any third party API. # +############################################################################################## +""" + +import json +import urllib.error +import urllib.request +from urllib.parse import urlencode, quote_plus from cortexutils.analyzer import Analyzer -import urllib.request -import json +APP_VERSION = "2.0" +APP_ID = "ps-thehive-analyzer/{}".format(APP_VERSION) +IP_DATA_TYPE = "ip" +DOMAIN_DATA_TYPE = "domain" +FQDN_DATA_TYPE = "fqdn" +HASH_DATA_TYPE = "hash" +RF_API = "https://api.recordedfuture.com/v2/" +URL_DATA_TYPE = "url" +DEFAULT_LINKS_MAP = { + 'Links_Threat_Actors': ["No Threat Actor Links Found"], + 'Links_Tools_Malware': ["No Malware Links Found"], + 'Links_TTPs_Mitre': ["No MITRE ATT&CK TTP Links Found"], + 'Links_TTPs_Attack_Vector': ["No Attack Vector Links Found"], + 'Links_Indicators_IP': ["No IP Address Links Found"], + 'Links_Indicators_Domain': ["No Domain Links Found"], + 'Links_Indicators_URL': ["No URL Links Found"], + 'Links_Indicators_Hash': ["No Hash Links Found"], + 'Links_Detection_Malware_Sig': ["No Malware Signature Links Found"], + 'Links_Victims_Org': ["No Victim Organization Links Found"], + 'Links_Victims_IP': ["No Victim IP Address Links Found"], + 'Links_Exploit_Vuln': ["No Vulnerability Links Found"], +} + + +class RecordedFuture(Analyzer): + """Recorded Future Analyzer class used for observable enrichment and to format the report.""" -class RecordedFutureAnalyzer(Analyzer): def __init__(self): Analyzer.__init__(self) - self.recordedfuture_key = self.get_param('config.key', None, 'Missing RecordedFuture API key') - self.proxies = self.get_param('config.proxy', None) + self.api_key = self.get_param("config.key", None, "Recorded Future token is missing") + + def lookup_observable(self, observable, data_type): + """Query the Recorded Future API for entity enrichment. + + Return Risk, Links, Related Entities, Intelligence Card link, and Counts. + + Args: + observable (string): Case observable to enrich with Recorded Future + data_type (string): the observable's data type + + Returns: + dict: the Recorded Future JSON response from observable lookup + """ + # if analyzer is ran on a URL observable, encode url string before sending to API + if data_type == URL_DATA_TYPE: + observable = quote_plus(observable) + # if observable is a fqdn in TheHive, use the Recorded Future domain endpoint + elif data_type == FQDN_DATA_TYPE: + data_type = DOMAIN_DATA_TYPE + + # URL to query Recorded Future API + params = {'fields': 'aiInsights,counts,entity,intelCard,links,relatedEntities,risk'} + url = RF_API + ("%s/%s?%s") % (data_type, observable, urlencode(params)) + + token = self.api_key + headers = {'X-RFToken': token, 'User-Agent': APP_ID} + req = urllib.request.Request(url, None, headers) + + json_response = {} + try: + with urllib.request.urlopen(req) as res: + json_response = json.loads(res.read().decode("utf-8")) + except urllib.error.HTTPError as e: + self.error("HTTP Error reason: " + e.reason) + except IOError as e: + self.error(str(e)) + + return json_response + + def add_related_entities(self, related_entities, entities_list): + """ + Add related entities to the analyzer report if they contain 5 or more co-occurrences. + + Args: + related_entities (list): related entities returned from the API query + entities_list (list): append entities to this list if they pass the check + """ + for related in related_entities: + if int(related['count']) > 4: + entities_list.append(related['entity']['name']) + + def format_related_entities(self, json_response, dict_report): + """ + Get related entities from the JSON response and format into the report. + + Args: + json_response (dict): API response containing entity context + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with related entities + """ + # Initializing List variables used to store the wanted information + malwareCategory = [] + relatedHash = [] + relatedIpAddress = [] + relatedThreatActor = [] + relatedInternetDomainName = [] + relatedMalware = [] + relatedAttackVector = [] + + try: + for relatedEntity in json_response['data']['relatedEntities']: + if relatedEntity['type'] == "RelatedMalwareCategory": + self.add_related_entities(relatedEntity['entities'], malwareCategory) + if relatedEntity['type'] == "RelatedHash": + self.add_related_entities(relatedEntity['entities'], relatedHash) + if relatedEntity['type'] == "RelatedIpAddress": + self.add_related_entities(relatedEntity['entities'], relatedIpAddress) + if relatedEntity['type'] == "RelatedThreatActor": + self.add_related_entities(relatedEntity['entities'], relatedThreatActor) + if relatedEntity['type'] == "RelatedInternetDomainName": + self.add_related_entities(relatedEntity['entities'], relatedInternetDomainName) + if relatedEntity['type'] == "RelatedMalware": + self.add_related_entities(relatedEntity['entities'], relatedMalware) + if relatedEntity['type'] == "RelatedAttackVector": + self.add_related_entities(relatedEntity['entities'], relatedAttackVector) + except KeyError: + pass + + if not relatedThreatActor: + relatedThreatActor.append("No Related Threat Actor Found") + if not malwareCategory: + malwareCategory.append("No Malware Category Found") + if not relatedHash: + relatedHash.append("No Related Hashes Found") + if not relatedIpAddress: + relatedIpAddress.append("No Related IP Addresses Found") + if not relatedInternetDomainName: + relatedInternetDomainName.append("No Related Domain Names Found") + if not relatedMalware: + relatedMalware.append("No Malware Family Found") + if not relatedAttackVector: + relatedAttackVector.append("No Related Attack Vector Found") + + dict_report['Malware_Category'] = malwareCategory + dict_report['Malware_Family'] = relatedMalware + dict_report['Threat_Actor'] = relatedThreatActor + dict_report['Related_Hashes'] = relatedHash + dict_report['Related_IPs'] = relatedIpAddress + dict_report['Related_Domains'] = relatedInternetDomainName + dict_report['Attack_Vector'] = relatedAttackVector + + return dict_report + + def add_link_to_list(self, entities, links): + """ + Add Linked entity to the provided links list if it is not a duplicate. + + Args: + entities (list): the list of linked entities to be added + links (list): append entities to this list if they are unique + """ + for entity in entities: + if entity['name'] not in links: + links.append(entity['name']) + + def add_actors_tools_ttps(self, section_lists, dict_report): + """ + Parse Actors, Tools & TTPs from links and format into the report. + + Args: + section_lists (list): entity lists under links Actors, Tools & TTPs sections + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with Actors, Tools & TTPs + """ + linksThreatActors = dict_report.get('Links_Threat_Actors', []) + linksToolsMalware = dict_report.get('Links_Tools_Malware', []) + linksTTPsMitre = dict_report.get('Links_TTPs_Mitre', []) + linksTTPsAttackVector = dict_report.get('Links_TTPs_Attack_Vector', []) + + for section_list in section_lists: + type_name = section_list.get('type', {}).get('name') + + if type_name == "Threat Actor": + self.add_link_to_list(section_list['entities'], linksThreatActors) + elif type_name == "Malware": + self.add_link_to_list(section_list['entities'], linksToolsMalware) + elif type_name == "MitreAttackIdentifier": + self.add_link_to_list(section_list['entities'], linksTTPsMitre) + elif type_name == "AttackVector": + self.add_link_to_list(section_list['entities'], linksTTPsAttackVector) + + dict_report['Links_Threat_Actors'] = linksThreatActors + dict_report['Links_Tools_Malware'] = linksToolsMalware + dict_report['Links_TTPs_Mitre'] = linksTTPsMitre + dict_report['Links_TTPs_Attack_Vector'] = linksTTPsAttackVector + + return dict_report + + def add_indicators_detection_rules(self, section_lists, dict_report): + """ + Parse Indicators & Detection Rules from links and format into the report. + + Args: + section_lists (list): entity lists under links Indicators & Detection Rules sections + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with Indicators & Detection Rules + """ + linksIndicatorsIP = dict_report.get('Links_Indicators_IP', []) + linksIndicatorsDomain = dict_report.get('Links_Indicators_Domain', []) + linksIndicatorsURL = dict_report.get('Links_Indicators_URL', []) + linksIndicatorsHash = dict_report.get('Links_Indicators_Hash', []) + linksDetectionMalwareSig = dict_report.get('Links_Detection_Malware_Sig', []) + + for section_list in section_lists: + type_name = section_list.get('type', {}).get('name') + + if type_name == "IpAddress": + self.add_link_to_list(section_list['entities'], linksIndicatorsIP) + elif type_name == "InternetDomainName": + self.add_link_to_list(section_list['entities'], linksIndicatorsDomain) + elif type_name == "URL": + self.add_link_to_list(section_list['entities'], linksIndicatorsURL) + elif type_name == "Hash": + self.add_link_to_list(section_list['entities'], linksIndicatorsHash) + elif type_name == "MalwareSignature": + self.add_link_to_list(section_list['entities'], linksDetectionMalwareSig) + + dict_report['Links_Indicators_IP'] = linksIndicatorsIP + dict_report['Links_Indicators_Domain'] = linksIndicatorsDomain + dict_report['Links_Indicators_URL'] = linksIndicatorsURL + dict_report['Links_Indicators_Hash'] = linksIndicatorsHash + dict_report['Links_Detection_Malware_Sig'] = linksDetectionMalwareSig + + return dict_report + + def add_victims_exploit_targets(self, section_lists, dict_report): + """ + Parse Victims & Exploit Targets from links and format into the report. + + Args: + section_lists (list): entity lists under links Victims & Exploit Targets sections + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with Victims & Exploit Targets + """ + linksVictimsOrg = dict_report.get('Links_Victims_Org', []) + linksVictimsIP = dict_report.get('Links_Victims_IP', []) + linksExploitVuln = dict_report.get('Links_Exploit_Vuln', []) + + for section_list in section_lists: + type_name = section_list.get('type', {}).get('name') + + if type_name == "Organization": + self.add_link_to_list(section_list['entities'], linksVictimsOrg) + elif type_name == "IpAddress": + self.add_link_to_list(section_list['entities'], linksVictimsIP) + elif type_name == "CyberVulnerability": + self.add_link_to_list(section_list['entities'], linksExploitVuln) + + dict_report['Links_Victims_Org'] = linksVictimsOrg + dict_report['Links_Victims_IP'] = linksVictimsIP + dict_report['Links_Exploit_Vuln'] = linksExploitVuln + + return dict_report + + def add_default_links_values(self, dict_report): + """ + Set default text to indicate when no links exist. + This prevents formatting issues within the analyzer report. + + Args: + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with links + """ + for key in DEFAULT_LINKS_MAP: + if not dict_report.get(key): + dict_report[key] = DEFAULT_LINKS_MAP[key] + + return dict_report + + def format_links(self, json_response, dict_report): + """ + Get links from the JSON response and format into the report. + + Args: + json_response (dict): API response containing entity context + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with links + """ + try: + for hit in json_response['data']['links']['hits']: + for section in hit['sections']: + if section['lists']: + section_name = section.get('section_id', {}).get('name') + + if section_name == "Actors, Tools & TTPs": + dict_report = self.add_actors_tools_ttps(section['lists'], dict_report) + elif section_name == "Indicators & Detection Rules": + dict_report = self.add_indicators_detection_rules( + section['lists'], dict_report + ) + elif section_name == "Victims & Exploit Targets": + dict_report = self.add_victims_exploit_targets( + section['lists'], dict_report + ) + except KeyError: + pass + + self.add_default_links_values(dict_report) + + return dict_report + + def format_risk(self, json_response, dict_report): + """ + Format Risk data from the JSON response and add to the report. + + Args: + json_response (dict): API response containing entity context + dict_report (dict): analyzer report content + + Returns: + dict: the analyzer report content with Risk data + """ + evidenceDetails = {} + risk_obj = json_response['data']['risk'] + + try: + riskScore = risk_obj['score'] + except KeyError: + riskScore = 0 + + try: + evidenceDetails = risk_obj['evidenceDetails'] + evidenceDetails.reverse() + except KeyError: + pass + + if not evidenceDetails: + evidenceDetails = [ + { + "criticality": 0, + "criticalityLabel": "None", + "rule": "No Risk Rules Found", + "evidenceString": "No Evidence Details Found", + } + ] + + risk_summary = risk_obj['riskSummary'] + criticality = risk_obj['criticality'] + criticality_label = risk_obj['criticalityLabel'] + + dict_report['Risk_Score'] = riskScore + dict_report['Risk_Summary'] = risk_summary + dict_report['Risk_Details'] = evidenceDetails + dict_report['Criticality'] = criticality + dict_report['Criticality_Label'] = criticality_label + + return dict_report + + def build_report(self, json_response, observable): + """Parse the JSON response from entity enrichment to build the long.html report. + + Args: + json_response (dict): API response containing entity context + observable (string): observable enriched with Recorded Future + """ + ai_insights_obj = json_response['data']['aiInsights'] + if ai_insights_obj['text']: + ai_insights = ai_insights_obj['text'] + elif ai_insights_obj['comment']: + ai_insights = ai_insights_obj['comment'] + else: + ai_insights = "Insufficient Information for Analysis" + + try: + if self.data_type == URL_DATA_TYPE: + intel_card = "https://app.recordedfuture.com/live/sc/entity/url%3A" + observable + else: + intel_card = json_response['data']['intelCard'] + except KeyError: + intel_card = "https://app.recordedfuture.com/live/" + + dict_report = { + 'Intel_Card': intel_card, + 'AI_Insights': ai_insights, + } + + dict_report = self.format_risk(json_response, dict_report) + dict_report = self.format_related_entities(json_response, dict_report) + dict_report = self.format_links(json_response, dict_report) + + # ensure_ascii should be set to False, but there appears to be a bug in Cortexutils. + # Setting True for now. + self.report(dict_report, True) def summary(self, raw): + """Creates the Observable short summary tag to include the Risk Score and color-coded + by Criticality. + + Args: + raw (dict): The long report contents + + Returns: + dict: The short Summary tag taxonomy + """ taxonomies = [] - namespace = 'RF' - - level = 'info' - predicate = 'score' - value = '{}/100'.format(raw['data']['risk']['score']) - criticality = raw['data']['risk']['criticality'] - if criticality == 0: - level = 'safe' - elif criticality == 1: - level = 'info' + namespace = "RecordedFuture" + predicate = "RiskScore" + level = "safe" + value = raw['Risk_Score'] + + criticality = raw['Criticality'] + + if criticality == 1: + level = "info" elif criticality == 2: - level = 'suspicious' + level = "suspicious" elif criticality >= 3: - level = 'malicious' - taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) + level = "malicious" - level = 'info' - predicate = '#evidenceDetails' - value = str(len(raw['data']['risk']['evidenceDetails'])) taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) - - return {"taxonomies": taxonomies} + return {'taxonomies': taxonomies} def run(self): - if self.data_type in ['domain', 'ip', 'hash']: - data = self.get_param('data', None, 'Data is missing') - url = 'https://api.recordedfuture.com/v2/{}/{}?fields=risk%2CintelCard'.format(self.data_type, data) - req = urllib.request.Request(url, None, {'X-RFToken': self.recordedfuture_key}) - try: - with urllib.request.urlopen(req) as res: - j = json.loads(res.read().decode("utf-8")) - self.summary(j) - return self.report(j) - except IOError as e: - self.error(str(e)) + """The entry point when the Recorded Future Analyzer is run on an observable.""" + Analyzer.run(self) + types = [IP_DATA_TYPE, DOMAIN_DATA_TYPE, FQDN_DATA_TYPE, HASH_DATA_TYPE, URL_DATA_TYPE] + + if self.data_type in types: + observable = self.get_param("data", None, "Data is missing") + json_response = self.lookup_observable(observable, self.data_type) + self.build_report(json_response, observable) else: - self.error('Invalid data type') + self.error("Invalid data type") + -if __name__ == '__main__': - RecordedFutureAnalyzer().run() +if __name__ == "__main__": + RecordedFuture().run() diff --git a/analyzers/Splunk/Dockerfile b/analyzers/Splunk/Dockerfile new file mode 100644 index 000000000..95c35ef91 --- /dev/null +++ b/analyzers/Splunk/Dockerfile @@ -0,0 +1,6 @@ +# See https://github.com/splunk/splunk-sdk-python for supported python versions +FROM python:3.7 +WORKDIR /worker +COPY . Splunk +RUN test ! -e Splunk/requirements.txt || pip install --no-cache-dir -r Splunk/requirements.txt +ENTRYPOINT Splunk/splunk.py diff --git a/responders/Jupyter_Responder/Dockerfile b/responders/Jupyter_Responder/Dockerfile index e57117cda..d852f1ad1 100644 --- a/responders/Jupyter_Responder/Dockerfile +++ b/responders/Jupyter_Responder/Dockerfile @@ -1,38 +1,9 @@ -# syntax=docker/dockerfile:1.3-labs FROM python:3.9 WORKDIR /worker COPY . Jupyter_Responder -RUN test ! -e Jupyter_Responder/requirements.txt || pip install --no-cache-dir -rJupyter_Responder/requirements.txt -RUN cat < /Jupyter_Responder/papermill_iorw.patch ---- iorw.py 2023-08-11 05:49:49.302149767 +0000 -+++ iorw.py 2023-08-11 05:48:38.553642098 +0000 -@@ -180,7 +180,7 @@ - class HttpHandler(object): - @classmethod - def read(cls, path): -- return requests.get(path, headers={'Accept': 'application/json'}).text -+ return json.dumps(requests.get(path, headers={'Accept': 'application/json'}).json()["content"]) - - @classmethod - def listdir(cls, path): -@@ -188,7 +188,9 @@ - - @classmethod - def write(cls, buf, path): -- result = requests.put(path, json=json.loads(buf)) -+ payload = {"type": "notebook", "format": "json", "path": path} -+ payload["content"] = json.loads(buf) -+ result = requests.put(path, json=payload) - result.raise_for_status() - - @classmethod -EOF +RUN test ! -e Jupyter_Responder/requirements.txt || pip install --no-cache-dir -r Jupyter_Responder/requirements.txt - -RUN pip install papermill -RUN apt update -RUN apt install patch -RUN patch $(python3 -c "from papermill import iorw; print(iorw.__file__)") /Jupyter_Responder/papermill_iorw.patch +RUN apt update &&\ + apt install patch &&\ + patch $(python3 -c "from papermill import iorw; print(iorw.__file__)") Jupyter_Responder/papermill_iorw.patch ENTRYPOINT Jupyter_Responder/jupyter.py - - diff --git a/responders/Jupyter_Responder/papermill_iorw.patch b/responders/Jupyter_Responder/papermill_iorw.patch new file mode 100644 index 000000000..411ca7a7f --- /dev/null +++ b/responders/Jupyter_Responder/papermill_iorw.patch @@ -0,0 +1,22 @@ +--- iorw.py 2023-08-11 05:49:49.302149767 +0000 ++++ iorw.py 2023-08-11 05:48:38.553642098 +0000 +@@ -180,7 +180,7 @@ + class HttpHandler(object): + @classmethod + def read(cls, path): +- return requests.get(path, headers={'Accept': 'application/json'}).text ++ return json.dumps(requests.get(path, headers={'Accept': 'application/json'}).json()["content"]) + + @classmethod + def listdir(cls, path): +@@ -188,7 +188,9 @@ + + @classmethod + def write(cls, buf, path): +- result = requests.put(path, json=json.loads(buf)) ++ payload = {"type": "notebook", "format": "json", "path": path} ++ payload["content"] = json.loads(buf) ++ result = requests.put(path, json=payload) + result.raise_for_status() + + @classmethod \ No newline at end of file diff --git a/thehive-templates/RecordedFuture_2_0/long.html b/thehive-templates/RecordedFuture_2_0/long.html new file mode 100644 index 000000000..8ece4354f --- /dev/null +++ b/thehive-templates/RecordedFuture_2_0/long.html @@ -0,0 +1,160 @@ +
+
+ {{artifact.data | fang}} +
+
+ {{content.errorMessage}} +
+
+ +
+
+ Summary +
+
+
+
ERROR:
+
{{content.errortext}} 
+
+
+
Risk Score
+
{{content.Risk_Score}}
+
+
+
Criticality
+
{{content.Criticality_Label}} ({{content.Criticality}})
+
+
+
Risk Summary
+
{{content.Risk_Summary}}
+
+ Intelligence Card +
+
+ Recorded Future AI Insights +
+
+
+
AI Insights
+
{{content.AI_Insights}}
+
+
+
+ Triggered Risk Rules +
+
+ + + + + + + + + + + +
CriticalityRisk RuleEvidence Details
+ + {{evidence.criticalityLabel}} + + {{evidence.rule}}{{evidence.evidenceString}}
+
+
+ Links: Actors, Tools & TTPs +
+
+
+
Threat Actor
+
{{lta}}
+
+
+
Malware
+
{{ltm}}
+
+
+
MITRE ATT&CK Identifier
+
{{lttpm}}
+
+
+
Attack Vector
+
{{lttpav}}
+
+
+
+ Links: Indicators & Detection Rules +
+
+
+
IP Address
+
{{liip}}
+
+
+
Domain
+
{{lid}}
+
+
+
URL
+
{{liu}}
+
+
+
Hash
+
{{lih}}
+
+
+
Malware Signature
+
{{ldms}}
+
+
+
+ Links: Victims & Exploit Targets +
+
+
+
Victim Organization
+
{{lvo}}
+
+
+
Victim IP Address
+
{{lvi}}
+
+
+
Vulnerability
+
{{lev}}
+
+
+
+ Related Context +
+
+
+
Related Threat Actor
+
{{ta}}
+
+
+
Related Attack Vector
+
{{av}}
+
+
+
Malware Category
+
{{mc}}
+
+
+
Malware Family
+
{{mf}}
+
+
+
Related IPs
+
{{ip}}
+
+
+
Related Domains
+
{{domain}}
+
+
+
Related Hashes
+
{{hash}}
+
+
+
diff --git a/thehive-templates/RecordedFuture_2_0/short.html b/thehive-templates/RecordedFuture_2_0/short.html new file mode 100644 index 000000000..5bd8fbd3a --- /dev/null +++ b/thehive-templates/RecordedFuture_2_0/short.html @@ -0,0 +1,5 @@ + + {{t.namespace}}:{{t.predicate}}="{{t.value}}" + diff --git a/thehive-templates/RecordedFuture_risk_1_0/long.html b/thehive-templates/RecordedFuture_risk_1_0/long.html deleted file mode 100644 index 42c5bf2ad..000000000 --- a/thehive-templates/RecordedFuture_risk_1_0/long.html +++ /dev/null @@ -1,55 +0,0 @@ - -
-
- Summary -
-
-
-
Score
-
{{content.data.risk.score}}/100
-
-
-
Criticality
-
{{content.data.risk.criticalityLabel}} ({{content.data.risk.criticality}})
-
-
-
Risk summary
-
{{content.data.risk.riskSummary}}
-
- Intel Card -
-
- Triggered Risk Rules -
-
- - - - - - - - - - - -
CriticalityRuleEvidence
- - {{evidence.criticalityLabel}} - - {{evidence.rule}}{{evidence.evidenceString}}
-
-
- - -
-
- {{(artifact.data || artifact.attachment.name) | fang}} -
-
-
-
Error:
-
{{content.errorMessage}}
-
-
-
diff --git a/thehive-templates/RecordedFuture_risk_1_0/short.html b/thehive-templates/RecordedFuture_risk_1_0/short.html deleted file mode 100644 index 5fc0dabfb..000000000 --- a/thehive-templates/RecordedFuture_risk_1_0/short.html +++ /dev/null @@ -1,3 +0,0 @@ - - {{t.namespace}}:{{t.predicate}}="{{t.value}}" - diff --git a/utils/docker/Dockerfile_template b/utils/docker/Dockerfile_template index 99f52258e..9bd7d29c0 100644 --- a/utils/docker/Dockerfile_template +++ b/utils/docker/Dockerfile_template @@ -17,5 +17,5 @@ FROM python:3 WORKDIR /worker COPY . {worker_name} -RUN test ! -e {worker_name}/requirements.txt || pip install --no-cache-dir -r{worker_name}/requirements.txt +RUN test ! -e {worker_name}/requirements.txt || pip install --no-cache-dir -r {worker_name}/requirements.txt ENTRYPOINT {command} \ No newline at end of file