【pytest、playwright】allure报告生成视频和图片

news2024/9/29 16:14:28

目录

1、修改插件pytest_playwright

2、conftest.py配置

3、修改pytest.ini文件

4、运行case

5、注意事项


1、修改插件pytest_playwright

 pytest_playwright.py内容如下:

# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import shutil
import os
import sys
import warnings
from typing import Any, Callable, Dict, Generator, List, Optional

import pytest
from playwright.sync_api import (
    Browser,
    BrowserContext,
    BrowserType,
    Error,
    Page,
    Playwright,
    sync_playwright,
)
from slugify import slugify
import tempfile
import allure

artifacts_folder = tempfile.TemporaryDirectory(prefix="playwright-pytest-")


@pytest.fixture(scope="session", autouse=True)
def delete_output_dir(pytestconfig: Any) -> None:
    output_dir = pytestconfig.getoption("--output")
    if os.path.exists(output_dir):
        try:
            shutil.rmtree(output_dir)
        except FileNotFoundError:
            # When running in parallel, another thread may have already deleted the files
            pass


def pytest_generate_tests(metafunc: Any) -> None:
    if "browser_name" in metafunc.fixturenames:
        browsers = metafunc.config.option.browser or ["chromium"]
        metafunc.parametrize("browser_name", browsers, scope="session")


def pytest_configure(config: Any) -> None:
    config.addinivalue_line(
        "markers", "skip_browser(name): mark test to be skipped a specific browser"
    )
    config.addinivalue_line(
        "markers", "only_browser(name): mark test to run only on a specific browser"
    )


# Making test result information available in plugins
# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Any) -> Generator[None, Any, None]:
    # execute all other hooks to obtain the report object
    outcome = yield
    rep = outcome.get_result()

    # set a report attribute for each phase of a call, which can
    # be "setup", "call", "teardown"

    setattr(item, "rep_" + rep.when, rep)


def _get_skiplist(item: Any, values: List[str], value_name: str) -> List[str]:
    skipped_values: List[str] = []
    # Allowlist
    only_marker = item.get_closest_marker(f"only_{value_name}")
    if only_marker:
        skipped_values = values
        skipped_values.remove(only_marker.args[0])

    # Denylist
    skip_marker = item.get_closest_marker(f"skip_{value_name}")
    if skip_marker:
        skipped_values.append(skip_marker.args[0])

    return skipped_values


def pytest_runtest_setup(item: Any) -> None:
    if not hasattr(item, "callspec"):
        return
    browser_name = item.callspec.params.get("browser_name")
    if not browser_name:
        return

    skip_browsers_names = _get_skiplist(
        item, ["chromium", "firefox", "webkit"], "browser"
    )

    if browser_name in skip_browsers_names:
        pytest.skip("skipped for this browser: {}".format(browser_name))


VSCODE_PYTHON_EXTENSION_ID = "ms-python.python"


@pytest.fixture(scope="session")
def browser_type_launch_args(pytestconfig: Any) -> Dict:
    launch_options = {}
    headed_option = pytestconfig.getoption("--headed")
    if headed_option:
        launch_options["headless"] = False
    elif VSCODE_PYTHON_EXTENSION_ID in sys.argv[0] and _is_debugger_attached():
        # When the VSCode debugger is attached, then launch the browser headed by default
        launch_options["headless"] = False
    browser_channel_option = pytestconfig.getoption("--browser-channel")
    if browser_channel_option:
        launch_options["channel"] = browser_channel_option
    slowmo_option = pytestconfig.getoption("--slowmo")
    if slowmo_option:
        launch_options["slow_mo"] = slowmo_option
    return launch_options


def _is_debugger_attached() -> bool:
    pydevd = sys.modules.get("pydevd")
    if not pydevd or not hasattr(pydevd, "get_global_debugger"):
        return False
    debugger = pydevd.get_global_debugger()
    if not debugger or not hasattr(debugger, "is_attached"):
        return False
    return debugger.is_attached()


def _build_artifact_test_folder(
        pytestconfig: Any, request: pytest.FixtureRequest, folder_or_file_name: str
) -> str:
    output_dir = pytestconfig.getoption("--output")
    return os.path.join(output_dir, slugify(request.node.nodeid), folder_or_file_name)


@pytest.fixture(scope="session")
def browser_context_args(
        pytestconfig: Any,
        playwright: Playwright,
        device: Optional[str],
) -> Dict:
    context_args = {}
    if device:
        context_args.update(playwright.devices[device])
    base_url = pytestconfig.getoption("--base-url")
    if base_url:
        context_args["base_url"] = base_url

    video_option = pytestconfig.getoption("--video")
    capture_video = video_option in ["on", "retain-on-failure"]
    if capture_video:
        context_args["record_video_dir"] = artifacts_folder.name

    return context_args


@pytest.fixture(scope="session")
def playwright() -> Generator[Playwright, None, None]:
    pw = sync_playwright().start()
    yield pw
    pw.stop()


@pytest.fixture(scope="session")
def browser_type(playwright: Playwright, browser_name: str) -> BrowserType:
    return getattr(playwright, browser_name)


@pytest.fixture(scope="session")
def launch_browser(
        browser_type_launch_args: Dict,
        browser_type: BrowserType,
) -> Callable[..., Browser]:
    def launch(**kwargs: Dict) -> Browser:
        launch_options = {**browser_type_launch_args, **kwargs}
        browser = browser_type.launch(**launch_options)
        return browser

    return launch


@pytest.fixture(scope="session")
def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:
    browser = launch_browser()
    yield browser
    browser.close()
    artifacts_folder.cleanup()


@pytest.fixture()
def context(
        browser: Browser,
        browser_context_args: Dict,
        pytestconfig: Any,
        request: pytest.FixtureRequest,
) -> Generator[BrowserContext, None, None]:
    pages: List[Page] = []
    context = browser.new_context(**browser_context_args)
    context.on("page", lambda page: pages.append(page))

    tracing_option = pytestconfig.getoption("--tracing")
    capture_trace = tracing_option in ["on", "retain-on-failure"]
    if capture_trace:
        context.tracing.start(
            name=slugify(request.node.nodeid),
            screenshots=True,
            snapshots=True,
            sources=True,
        )

    yield context

    # If requst.node is missing rep_call, then some error happened during execution
    # that prevented teardown, but should still be counted as a failure
    failed = request.node.rep_call.failed if hasattr(request.node, "rep_call") else True

    if capture_trace:
        retain_trace = tracing_option == "on" or (
                failed and tracing_option == "retain-on-failure"
        )
        if retain_trace:
            trace_path = _build_artifact_test_folder(pytestconfig, request, "trace.zip")
            context.tracing.stop(path=trace_path)
        else:
            context.tracing.stop()

    screenshot_option = pytestconfig.getoption("--screenshot")
    capture_screenshot = screenshot_option == "on" or (
            failed and screenshot_option == "only-on-failure"
    )
    if capture_screenshot:
        print("--------------------------111")
        for index, page in enumerate(pages):
            human_readable_status = "failed" if failed else "finished"
            screenshot_path = _build_artifact_test_folder(
                pytestconfig, request, f"test-{human_readable_status}-{index + 1}.png"
            )
            print(f'-----------------{screenshot_path}')
            try:
                page.screenshot(timeout=5000, path=screenshot_path)

                allure.attach.file(screenshot_path,
                                   name=f"{request.node.name}-{human_readable_status}-{index + 1}",
                                   attachment_type=allure.attachment_type.PNG
                                   )

            except Error:
                pass

    context.close()

    video_option = pytestconfig.getoption("--video")
    preserve_video = video_option == "on" or (
            failed and video_option == "retain-on-failure"
    )
    if preserve_video:
        for page in pages:
            video = page.video
            if not video:
                continue
            try:
                video_path = video.path()
                file_name = os.path.basename(video_path)
                file_path = _build_artifact_test_folder(pytestconfig, request, file_name)
                video.save_as(
                    path=file_path
                )
                # 放入视频
                allure.attach.file(file_path, name=f"{request.node.name}-{human_readable_status}-{index + 1}",
                                   attachment_type=allure.attachment_type.WEBM)

            except Error:
                # Silent catch empty videos.
                pass


@pytest.fixture
def page(context: BrowserContext) -> Generator[Page, None, None]:
    page = context.new_page()
    yield page


@pytest.fixture(scope="session")
def is_webkit(browser_name: str) -> bool:
    return browser_name == "webkit"


@pytest.fixture(scope="session")
def is_firefox(browser_name: str) -> bool:
    return browser_name == "firefox"


@pytest.fixture(scope="session")
def is_chromium(browser_name: str) -> bool:
    return browser_name == "chromium"


@pytest.fixture(scope="session")
def browser_name(pytestconfig: Any) -> Optional[str]:
    # When using unittest.TestCase it won't use pytest_generate_tests
    # For that we still try to give the user a slightly less feature-rich experience
    browser_names = pytestconfig.getoption("--browser")
    if len(browser_names) == 0:
        return "chromium"
    if len(browser_names) == 1:
        return browser_names[0]
    warnings.warn(
        "When using unittest.TestCase specifying multiple browsers is not supported"
    )
    return browser_names[0]


@pytest.fixture(scope="session")
def browser_channel(pytestconfig: Any) -> Optional[str]:
    return pytestconfig.getoption("--browser-channel")


@pytest.fixture(scope="session")
def device(pytestconfig: Any) -> Optional[str]:
    return pytestconfig.getoption("--device")


def pytest_addoption(parser: Any) -> None:
    group = parser.getgroup("playwright", "Playwright")
    group.addoption(
        "--browser",
        action="append",
        default=[],
        help="Browser engine which should be used",
        choices=["chromium", "firefox", "webkit"],
    )
    group.addoption(
        "--headed",
        action="store_true",
        default=False,
        help="Run tests in headed mode.",
    )
    group.addoption(
        "--browser-channel",
        action="store",
        default=None,
        help="Browser channel to be used.",
    )
    group.addoption(
        "--slowmo",
        default=0,
        type=int,
        help="Run tests with slow mo",
    )
    group.addoption(
        "--device", default=None, action="store", help="Device to be emulated."
    )
    group.addoption(
        "--output",
        default="test-results",
        help="Directory for artifacts produced by tests, defaults to test-results.",
    )
    group.addoption(
        "--tracing",
        default="off",
        choices=["on", "off", "retain-on-failure"],
        help="Whether to record a trace for each test.",
    )
    group.addoption(
        "--video",
        default="off",
        choices=["on", "off", "retain-on-failure"],
        help="Whether to record video for each test.",
    )
    group.addoption(
        "--screenshot",
        default="off",
        choices=["on", "off", "only-on-failure"],
        help="Whether to automatically capture a screenshot after each test.",
    )

主要修改了原插件的哪些内容:

你们搜索这个文件内容,关于allure的内容,都是我们做了修改的~

2、conftest.py配置

删除原本安装的pytest_playwright插件:

在settiings中找到pytest_playwright,点击减号就可以删除了~

3、修改pytest.ini文件

 --tracing=retain-on-failure
          --screenshot=only-on-failure
          --video=retain-on-failure

4、运行case

此时运行case,当case失败时,我们可以看到这个目录:

里面就是截图,录屏,以及路由追踪~

5、注意事项

如果conftest.py中配置了如下内容:

 

要保证json文件中有值,否则会报错~

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1550434.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Prompt Engineering的4 种方法

此为观看视频 4 Methods of Prompt Engineering 后的笔记。 从通用模型到专用模型,fine tuning(微调)和prompt engineering(提示工程)是2种非常重要的方法。本文深入探讨了prompt engineering的4种方法。 首先&#…

Trello国内替代工具有哪些?分享5款

盘点5款类似Trello的本地部署项目管理工具:1.PingCode;2.Worktile;3.Teambition;4.redmine;5.TAIga.io。 Trello是一款杰出的协作与工作管理应用,专为追踪团队项目、凸显当前活动任务、分配责任人&#xff…

爬虫(Web Crawler)介绍与应用

## 摘要 本文将介绍什么是爬虫(Web Crawler)以及其在信息抓取、数据分析等领域的应用。我们将深入探讨爬虫的工作原理、设计特点以及开发过程中需要考虑的关键问题。 ## 一、什么是爬虫 爬虫是一种自动化程序或脚本,用于从互联网上抓取信息…

计算机组成原理-6-计算机的运算方法

6. 计算机的运算方法 文章目录 6. 计算机的运算方法6.1 机器数的表示6.1.1 无符号数和有符号数6.1.2 有符号数-原码6.1.3 有符号数-补码6.1.4 有符号数-反码6.1.5 有符号数-移码6.1.6 原码、补码、反码的比较 6.2 数的定点表示和浮点表示6.2.1 定点表示6.2.2 浮点表示6.2.3 ΔI…

Lilishop商城(windows)本地部署【docker版】

Lilishop商城(windows)本地部署【docker版】 部署官方文档:LILISHOP-开发者中心 https://gitee.com/beijing_hongye_huicheng/lilishop 本地安装docker https://docs.pickmall.cn/deploy/win/deploy.html 命令端页面 启动后docker界面 注…

保障校园网络安全用堡垒机的几个原因分析

校园,人人都熟悉的地方,梦想知识开始的地方。在互联网数字化快速发展的今天,网络安全的学习环境是非常必要的。所以采购保障校园网络安全工具是必要的。那为什么一定要用堡垒机呢?这里我们一起来简单分析一下原因。 保障校园网络…

CleanMyMac X2024专业免费的国产Mac笔记本清理软件

非常高兴有机会向大家介绍CleanMyMac X 2024这款专业的Mac清理软件。它以其强大的清理能力、系统优化效果、出色的用户体验以及高度的安全性,在Mac清理软件市场中独树一帜。 CleanMyMac X2024全新版下载如下: https://wm.makeding.com/iclk/?zoneid49983 一、主要…

Docker搭建LNMP环境实战(03):VMware安装CentOS

Docker搭建LNMP环境实战(03):VMware安装CentOS 1、创建新的虚拟机,选择CentOS7镜像文件,并启动安装 启动VMware,创建新的虚拟机 图1 选择典型安装即可 选用最大最全的CentOS镜像文件:CentOS-7…

深度好文:解决Ubuntu 18.04安装nvidia显卡驱动,导致内核不匹配:无需重装系统修复内核

深度好文:解决Ubuntu 18.04安装nvidia显卡驱动,导致内核不匹配:无需重装系统修复内核 目录 一、问题描述二、尝试修复三、安装Nvidia驱动和CUDA并配置cuDNN四、总结 一、问题描述 昨天打算更新一下Ubuntu 18.04的显卡驱动,以支持…

element-ui checkbox 组件源码分享

简单分享 checkbox 组件,主要从以下三个方面来分享: 1、组件的页面结构 2、组件的属性 3、组件的方法 一、组件的页面结构 二、组件的属性 2.1 value / v-model 属性,绑定的值,类型 string / number / boolean,无…

存储的过程

一、存储过程 1.1 概述 存储过程可以轻松而高效的去完成这个需求,有点类似shell脚本里的函数 1.2 特点 存储过程在数据库中创建并保存,它不仅仅是 SQL 语句的集合,还可以加入一些特殊的控制结构,也可以控制数据的访问方式。存储过…

web前端面试题----->VUE

Vue的数据双向绑定是通过Vue的响应式系统实现的。具体原理: 1. Vue会在初始化时对数据对象进行遍历,使用Object.defineProperty方法将每个属性转化为getter、setter。这样在访问或修改数据时,Vue能够监听到数据的变化。 2. 当数据发生变化时…

【R语言从0到精通】-1-下载R语言与R最基础内容

在本科,没有人教的情况下,艰难的自学了R语言,因此我想能出一个R语言系列教程,在帮助大家的同时,温故而知新,特别如果你是生物或者医学从业者,那本教程正好合适,因为我也是生物人&…

Microsoft .NET 应用程序性能监控

什么是 .NET监控 Microsoft .NET 监视在确保可以开发和部署应用程序而不必面对性能滞后或中断方面发挥着重要作用。它使用警报、增长趋势报告和数据可视化技术来帮助管理员确保 Microsoft .NET 平台的全天候可用性。Microsoft.NET 性能监视是一种检测性能异常的先发制人方法&a…

2024年腾讯云4核8g服务器并发数、优惠价格、支持多少人在线?

腾讯云4核8G服务器价格:轻量4核8G12M优惠价格646元15个月、CVM S5服务器4核8G配置1437元买1年送3个月。腾讯云4核8G服务器支持多少人同时在线?支持30个并发数,可容纳日均1万IP人数访问。腾讯云百科txybk.com整理4核8G服务器支持多少人同时在线…

动手学机器学习笔记

初探机器学习 “两只手”代表的是人工智能可以做的两大类任务,即预测与决策。 “四条腿”则代表支撑人工智能的四大类科学技术,包括搜索、推理、学习和博弈。 非参数化模型(nonparametric model):与参数化模型相反&…

四川易点慧电子商务抖音小店:安全购物,无忧体验

在当今这个电子商务飞速发展的时代,线上购物已成为人们日常生活中不可或缺的一部分。然而,随着网络交易的日益频繁,安全问题也逐渐成为了消费者最为关注的问题之一。四川易点慧电子商务抖音小店深知消费者的担忧,始终将安全保障放…

2024消息预知在线客服系统php网站源码

新增消息预知&#xff0c;消息撤回&#xff0c;消息已读未读&#xff0c; 修复需要刷新才能收到消息 修复客户来源地址 修复消息提示音 修复桌面推送提醒 要求服务器环境&#xff1a; 宝塔面板 &#xff0c;Nginx1.16-1.18&#xff0c;7.2.23<php<7.3&#xff08;因…

LabVIEW单片机的废气再循环EGR检测系统

LabVIEW单片机的废气再循环EGR检测系统 实现了一种基于LabVIEW和STM32F103VET6单片机的EGR&#xff08;废气再循环&#xff09;检测系统&#xff0c;监测和控制船用二冲程柴油机的EGR运行状态。通过替代传统的NI采集卡&#xff0c;系统不仅降低了成本&#xff0c;同时也提升了数…

居家办公:职场新趋势与挑战

随着科技的飞速发展&#xff0c;互联网和智能设备的普及&#xff0c;居家办公逐渐成为职场新趋势。近年来&#xff0c;受疫情影响&#xff0c;许多企业纷纷采取居家办公模式&#xff0c;以保障员工健康安全。然而&#xff0c;居家办公在带来便利的同时&#xff0c;也带来了一系…