通用信息提取数据预处理

news2024/11/16 13:51:17

train_data='./datasets/duuie'
output_folder='./datasets/duuie_pre'
ignore_datasets=["DUEE", "DUEE_FIN_LITE"]
schema_folder='./datasets/seen_schema'

# 对CCKS2022 竞赛数据进行预处理
import shutil

# shutil.copytree(train_data,output_folder)

import os

life_folder = os.path.join(output_folder, "DUIE_LIFE_SPO")
org_folder = os.path.join(output_folder, "DUIE_ORG_SPO")

print(life_folder,org_folder)

import json

def load_jsonlines_file(filename):
    return [json.loads(line) for line in open(filename, encoding="utf8")]

life_train_instances = load_jsonlines_file(f"{life_folder}/train.json")
org_train_instances = load_jsonlines_file(f"{org_folder}/train.json")

for i in range(27695,27698):
    print(life_train_instances[i],'|',org_train_instances[i])

class RecordSchema:
    def __init__(self, type_list, role_list, type_role_dict):
        self.type_list = type_list
        self.role_list = role_list
        self.type_role_dict = type_role_dict
    def __repr__(self) -> str:
        repr_list = [f"Type: {self.type_list}\n", f"Role: {self.role_list}\n", f"Map: {self.type_role_dict}"]
        return "\n".join(repr_list)
    @staticmethod
    def get_empty_schema():
        return RecordSchema(type_list=list(), role_list=list(), type_role_dict=dict())
    @staticmethod
    def read_from_file(filename):
        lines = open(filename, encoding="utf8").readlines()
        type_list = json.loads(lines[0])# 类型
        role_list = json.loads(lines[1]) # 角色
        type_role_dict = json.loads(lines[2])#类型-角色
        return RecordSchema(type_list, role_list, type_role_dict)
    def write_to_file(self, filename):
        with open(filename, "w", encoding="utf8") as output:
            # 用于将Python对象编码(序列化)为JSON格式的字符串。设置ensure_ascii=False参数
            # 会告诉json.dumps()函数不要转义非ASCII字符
            output.write(json.dumps(self.type_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.role_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.type_role_dict, ensure_ascii=False) + "\n")

RecordSchema.read_from_file(f"{life_folder}/record.schema")

life_relation = RecordSchema.read_from_file(f"{life_folder}/record.schema").role_list

org_relation = RecordSchema.read_from_file(f"{org_folder}/record.schema").role_list

from collections import defaultdict

instance_dict = defaultdict(list)

for instance in life_train_instances + org_train_instances:
    instance_dict[instance["text"]] += [instance]

a=[i for i in life_train_instances for j in org_train_instances if i['text']==j['text']]

b=[i for i in org_train_instances for j in a if i['text']==j['text']]

for i in range(3):
    print(a[i]['relation'],'|',b[i]['relation'])

dict_1={1:2,3:4}
for i in dict_1:#相当于字典的keys()
    print(i)

from typing import Tuple, List, Dict

def merge_instance(instance_list):
    def all_equal(_x):#判断是否全相同
        for __x in _x:
            if __x != _x[0]:
                return False
        return True
    def entity_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    def relation_key(_x):
        return (
            tuple(_x["type"]),
            tuple(_x["args"][0]["offset"]),
            _x["args"][0]["type"],
            tuple(_x["args"][1]["offset"]),
            _x["args"][1]["type"],
        )

    def event_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    assert all_equal([x["text"] for x in instance_list])
    element_dict = {
        "entity": dict(),
        "relation": dict(),
        "event": dict(),
    }
    instance_id_list = list()
    for x in instance_list:
        instance_id_list += [x["id"]]
        for entity in x.get("entity", list()):
            element_dict["entity"][entity_key(entity)] = entity
        for relation in x.get("relation", list()):
            element_dict["relation"][relation_key(relation)] = relation
        for event in x.get("event", list()):
            element_dict["event"][event_key(event)] = event

    return {
        "id": "-".join(instance_id_list),
        "text": instance_list[0]["text"],
        "tokens": instance_list[0]["tokens"],
        "entity": list(element_dict["entity"].values()),
        "relation": list(element_dict["relation"].values()),
        "event": list(element_dict["event"].values()),
    }

 for text in instance_dict:
    instance_dict[text] = merge_instance(instance_dict[text])

for i in range(800,802):
    print(list(instance_dict.values())[i]['relation'])

import copy

with open(f"{life_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in life_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

 with open(f"{org_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in org_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

a_instances = load_jsonlines_file(f"{life_folder}/train.json")
b_instances = load_jsonlines_file(f"{org_folder}/train.json")

print(len(a_instances),len(b_instances))

import yaml

def load_definition_schema_file(filename):
    return yaml.load(open(filename, encoding="utf8"), Loader=yaml.FullLoader)

aa = load_definition_schema_file(os.path.join(schema_folder,'体育竞赛.yaml'))

mm=list()
for i in aa['事件'].values():
    mm+=i["参数"]   
mm=list(set(mm))

[x for x in aa['事件']]

aa['事件']['退役']["参数"].keys()

aaa={1:2,3:4}
for k,v in aaa.items():
    print(k,v)

def dump_schema(output_folder, schema_dict):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for schema_name, schema in schema_dict.items():
        schema_file = f"{output_folder}/{schema_name}.schema"
        with open(schema_file, "w", encoding="utf8") as output:
            for element in schema:
                output.write(json.dumps(element, ensure_ascii=False) + "\n")

def dump_event_schema(event_map, output_folder):
    role_list = list()
    for roles in event_map.values():
        role_list += roles["参数"]
    rols_list = list(set(role_list))
    type_list = list(event_map.keys())
    type_role_map = {event_type: list(event_map[event_type]["参数"].keys()) for event_type in event_map}
    dump_schema(
        output_folder=output_folder,
        schema_dict={
            "entity": [[], [], {}],
            "relation": [[], [], {}],
            "event": [type_list, rols_list, type_role_map],
            "record": [type_list, rols_list, type_role_map],
        },
    )

def filter_event_in_instance(instances,required_event_types):
    """Filter events in the instance, keep event mentions with `required_event_types`
    过滤实例中的事件,只保留需要的事件类别的事件标注
    """
    new_instances = list()
    for instance in instances:
        new_instance = copy.deepcopy(instance)
        new_instance["event"] = list(filter(lambda x: x["type"] in required_event_types, new_instance["event"]))
        new_instances += [new_instance]
    return new_instances

def dump_instances(instances, output_filename):
    with open(output_filename, "w", encoding="utf8") as output:
        for instance in instances:
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

def filter_event(data_folder, event_types, output_folder):
    """Keep event with `event_types` in `data_folder` save to `output_folder`
    过滤 `data_folder` 中的事件,只保留 `event_types` 类型事件保存到 `output_folder`"""
    dump_event_schema(event_types, output_folder)
    for split in ["train", "val"]:
        filename = os.path.join(data_folder, f"{split}.json")
        instances = [json.loads(line.strip()) for line in open(filename, encoding="utf8")]
        new_instances = filter_event_in_instance(instances, required_event_types=event_types)
        dump_instances(new_instances, os.path.join(output_folder, f"{split}.json"))

# 对事件数据进行预处理,过滤除 `灾害意外` 和 `体育竞赛` 外的事件标注
for schema in ["灾害意外", "体育竞赛"]:
    print(f"Building {schema} dataset ...")
    duee_folder = os.path.join(output_folder, "DUEE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    filter_event(
        duee_folder,
        schema["事件"],
        output_folder2,
    )

ty_instances = load_jsonlines_file(f"{output_folder}/体育竞赛/train.json")
zh_instances = load_jsonlines_file(f"{output_folder}/灾害意外/train.json")

print(len(ty_instances),len(zh_instances))

for i in range(11508,11608):
    print(ty_instances[i],'|',zh_instances[i])

bb=load_definition_schema_file(os.path.join(schema_folder, "金融信息.yaml"))

for i in bb['事件'].keys():
    print(i)

mm=list()
mm+=bb['事件']['中标']["参数"]   
mm=list(set(mm))

bb["事件"]['中标']["参数"] .keys()

for schema in ["金融信息"]:
    print(f"Building {schema} dataset ...")
    duee_fin_folder = os.path.join(output_folder, "DUEE_FIN_LITE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    # 依据不同事件类别将多事件抽取分割成多个单事件类型抽取
    # Separate multi-type extraction to multiple single-type extraction
    for event_type in schema["事件"]:
        filter_event(
           duee_fin_folder,
           {event_type: schema["事件"][event_type]},
            output_folder2 + "_" + event_type,
        )

vv=load_jsonlines_file(f"{output_folder}/DUEE_FIN_LITE/train.json")

zb_instances = load_jsonlines_file(f"{output_folder}/金融信息_中标/train.json")
zy_instances = load_jsonlines_file(f"{output_folder}/金融信息_质押/train.json")

print(len(zb_instances),len(zy_instances))

for i in range(6985,7015):
    print(zb_instances[i],'|',zy_instances[i])

def annonote_graph(
    entities: List[Dict] = [],
    relations: List[Dict] = [],
    events: List[Dict] = []):
    spot_dict = dict()
    asoc_dict = defaultdict(list)
    # 将实体关系事件转换为点关联图
    def add_spot(spot):
        spot_key = (tuple(spot["offset"]), spot["type"])
        spot_dict[spot_key] = spot
    def add_asoc(spot, asoc, tail):
        spot_key = (tuple(spot["offset"]), spot["type"])
        asoc_dict[spot_key] += [(tuple(tail["offset"]), tail["text"], asoc)]
    for entity in entities:
        add_spot(spot=entity)
    for relation in relations:
        add_spot(spot=relation["args"][0])
        add_asoc(spot=relation["args"][0], asoc=relation["type"], tail=relation["args"][1])
    for event in events:
        add_spot(spot=event)
        for argument in event["args"]:
            add_asoc(spot=event, asoc=argument["type"], tail=argument)
    spot_asoc_instance = list()
    for spot_key in sorted(spot_dict.keys()):
        offset, label = spot_key
        if len(spot_dict[spot_key]["offset"]) == 0:
            continue
        spot_instance = {
            "span": spot_dict[spot_key]["text"],
            "label": label,
            "asoc": list(),
        }
        for tail_offset, tail_text, asoc in sorted(asoc_dict.get(spot_key, [])):
            if len(tail_offset) == 0:
                continue
            spot_instance["asoc"] += [(asoc, tail_text)]
        spot_asoc_instance += [spot_instance]
    spot_labels = set([label for _, label in spot_dict.keys()])
    asoc_labels = set()
    for _, asoc_list in asoc_dict.items():
        for _, _, asoc in asoc_list:
            asoc_labels.add(asoc)
    return spot_labels, asoc_labels, spot_asoc_instance

def add_spot_asoc_to_single_file(filename):
    instances = [json.loads(line) for line in open(filename, encoding="utf8")]
    print(f"Add spot asoc to {filename} ...")
    with open(filename, "w", encoding="utf8") as output:
        for instance in instances:
            spots, asocs, spot_asoc_instance = annonote_graph(
                entities=instance["entity"],#实体
                relations=instance["relation"],#关系
                events=instance["event"],#事件
            )
            # 为对象添加spot_asoc
            instance["spot_asoc"] = spot_asoc_instance
            # 为对象添加spot
            instance["spot"] = list(spots)
            # 为对象添加asoc
            instance["asoc"] = list(asocs)
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

ff = os.path.join(output_folder,'金融信息_企业破产',"train.json")

ff_instances = [json.loads(line) for line in open(ff, encoding="utf8")]

for i in range(1046,1050):
    print(ff_instances[i])

a,b,yyj=annonote_graph( entities=ff_instances[11000]["entity"],
                relations=ff_instances[11000]["relation"],
                events=ff_instances[11000]["event"],)

data_folder=output_folder

def merge_schema(schema_list: List[RecordSchema]):
    type_set = set()
    role_set = set()
    type_role_dict = defaultdict(list)
    for schema in schema_list:
        for type_name in schema.type_list:
            type_set.add(type_name)
        for role_name in schema.role_list:
            role_set.add(role_name)
        for type_name in schema.type_role_dict:
            type_role_dict[type_name] += schema.type_role_dict[type_name]
    for type_name in type_role_dict:
        type_role_dict[type_name] = list(set(type_role_dict[type_name]))
    return RecordSchema(
        type_list=list(type_set),
        role_list=list(role_set),
        type_role_dict=type_role_dict,
    )

def convert_duuie_to_spotasoc(data_folder, ignore_datasets):
    schema_list = list()
    for task_folder in os.listdir(data_folder):#过滤无效
        if task_folder in ignore_datasets:
            continue
        if not os.path.isdir(os.path.join(data_folder, task_folder)):#过滤非文件夹
            continue
        print(f"Add spot asoc to {task_folder} ...")
        # 读取单任务的 Schema
        task_schema_file = os.path.join(data_folder, task_folder, "record.schema")
        # 向单任务数据中添加 Spot Asoc 标注
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "train.json"))
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "val.json"))
        record_schema = RecordSchema.read_from_file(task_schema_file)
        schema_list += [record_schema]
    # 融合不同任务的 Schema
    multi_schema = merge_schema(schema_list)
    multi_schema.write_to_file(os.path.join(data_folder, "record.schema"))

convert_duuie_to_spotasoc(output_folder,ignore_datasets)

 

 

 

 

 

 

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1809528.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

「网络原理」三次握手四次挥手

🎇个人主页:Ice_Sugar_7 🎇所属专栏:计网 🎇欢迎点赞收藏加关注哦! 三次握手&四次挥手 🍉连接管理🍌三次握手🍌意义🍌四次挥手🍌TCP 状态转换…

电路防护-贴片陶瓷气体放电管

贴片陶瓷气体放电管 GDT工作原理GDT主要特性参数典型电路压敏电阻与 TVS 管的区别 GDT工作原理 陶瓷气体放电管是一种电子器件,其工作原理基于气体放电现象。这种管子的内部填充了一种特定的气体,通常是氖气或氩气。当管子两端施加足够的电压时&#xf…

vue3-使用富文本编辑器-wangEditor-文章发表1

最近在搞项目:我们组内几位成员正在搞一个网站搭建,以后更新会比较缓慢 引言:如果要网站要用的富文本编辑器的话,这边推荐用wangEditor 官网地址传送 : wangEditorhttps://www.wangeditor.com/ 我现在还在扩展我的写文章用的富文本编辑器 现在我将简单介绍一下其基本使用方…

Python的return和yield,哪个是你的菜?

目录 1、return基础介绍 📚 1.1 return用途:数据返回 1.2 return执行:函数终止 1.3 return深入:无返回值情况 2、yield核心概念 🍇 2.1 yield与迭代器 2.2 生成器函数构建 2.3 yield的暂停与续行特性 3、retur…

在 Android App 里使用 C 代码 - NDK

原生开发套件 (NDK) 是一套工具,使能够在 Android 应用中使用 C 和 C 代码,并提供众多平台库,可使用这些平台库管理原生 activity 和访问实体设备组件,例如传感器和触控输入。 NDK 可能不适合大多数 Android 编程初学者&#xff…

使用 Jetpack Compose 实现 Android 偏好设置分类界面

使用 Jetpack Compose 实现 Android 偏好设置分类界面 Jetpack Compose 提供了一种现代且声明式的构建 Android 用户界面的方法,使其非常适合实现偏好设置分类界面。以下是如何实现的逐步指南: 1. 定义数据模型: 首先,定义数据模型来表示…

集成学习模型对比优化—银行业务

1.Data Understanding 2.Data Exploration 3.Data Preparation 4.Training Models 5.Optimization Model 集成学习模型对比优化—银行业务 1.Data Understanding import pandas as pd from matplotlib import pyplot as plt import seaborn as sns df pd.read_csv(&quo…

表的设计与查询

目录 一、表的设计 1.第一范式(一对一) 定义: 示例: 2.第二范式(一对多) 定义: 要求: 示例: 3.第三范式(多对多) 定义: 要求…

Bio-Info每日一题:Rosalind-06-Counting Point Mutations

🎉 进入生物信息学的世界,与Rosalind一起探索吧!🧬 Rosalind是一个在线平台,专为学习和实践生物信息学而设计。该平台提供了一系列循序渐进的编程挑战,帮助用户从基础到高级掌握生物信息学知识。无论你是初…

每日算法——归并排序

什么是归并排序 归并排序是一种分治算法。它将数组不断地分成两半,对每一半进行排序,然后再将排序好的两半合并起来。通过不断重复这个过程,最终得到完全排序的数组。 归并排序的注意点: 空间复杂度:归并排序需要额…

javascript动态绑定

介绍 先来看看ai的解释 动态绑定机制是面向对象编程中的一个核心概念,特别是在Java这样的语言中。它允许在运行时根据对象的实际类型来决定调用哪个方法,而不是在编译时。这是多态性的关键特性之一。 在Java中,动态绑定是通过方法调用和方法…

C#——枚举类型详情

枚举类型 枚举类型(也可以称为“枚举器”)由一组具有独立标识符(名称)的整数类型常量构成,在 C# 中枚举类型不仅可以在类或结构体的内部声明,也可以在类或结构体的外部声明,默认情况下枚举类型…

ViT:2 理解CLIP

大模型技术论文不断,每个月总会新增上千篇。本专栏精选论文重点解读,主题还是围绕着行业实践和工程量产。若在某个环节出现卡点,可以回到大模型必备腔调或者LLM背后的基础模型新阅读。而最新科技(Mamba,xLSTM,KAN)则提…

大模型基础——从零实现一个Transformer(2)

大模型基础——从零实现一个Transformer(1) 一、引言 上一章主要实现了一下Transformer里面的BPE算法和 Embedding模块定义 本章主要讲一下 Transformer里面的位置编码以及多头注意力 二、位置编码 2.1正弦位置编码(Sinusoidal Position Encoding) 其中: pos&…

linux中xterm窗口怎么调整字体大小

需求:打开的xterm窗口字体比较小,怎么才能调整字体大小,打开的大写: 解决方法: 在home目录下搞一个设置文件 .Xresource,里面内容如下 然后把设置文件添加到 .tcshrc 文件中生效 这样重新打开的xterm字…

MySQL数据库(二)和java复习

一.MySQL数据库学习(二) (一).DQL查询数据 DQL(Data Query Language)是用于从数据库中检索数据的语言。常见的 DQL 语句包括 SELECT、FROM、WHERE、GROUP BY、HAVING 和 ORDER BY 等关键字,用于指定要检索的数据、数据源、过滤条件、分组方…

《编程小白变大神:DjangoBlog带你飞越代码海洋》

还在为你的博客加载速度慢而烦恼?DjangoBlog性能优化大揭秘,让你的网站速度飞跃提升!本文将带你深入了解缓存策略、数据库优化、静态文件处理等关键技术,更有Gunicorn和Nginx的黄金搭档,让你的博客部署如虎添翼。无论你…

助力高考,一组彩色的文字

1、获取文本内容 首先&#xff0c;获取每个<div>元素的文本内容&#xff0c;并清空其内部HTML&#xff08;innerHTML ""&#xff09;。 2、创建<span>元素 然后&#xff0c;它遍历文本的每个字符&#xff0c;为每个字符创建一个新的<span>元素…

《python程序语言设计》2018版第5章第36题改造4.17 石头 剪刀 布某一方超过2次就结束。

代码编写记录 2024.05.04 05.36.01version 换一个什么数代替剪子 我先建立一个函数judgement condition 石头3 剪子2 布1 如何构建一个循环进行的架构&#xff0c;是我们最需要的想法 循环以什么条件开始呢 是小于2个还是大于2个。 guess_num random.randint(1, 3) computer…

nginx优化与防盗链【☆☆☆】

目录 一、用户层面的优化 1、隐藏版本号 方法一&#xff1a;修改配置文件 方法二&#xff1a;修改源码文件&#xff0c;重新编译安装 2、修改nginx用户与组 3、配置nginx网页缓存时间 4、nginx的日志切割 5、配置nginx实现连接超时 6、更改nginx运行进程数 7、开启网…