HPC Game小结

news2024/9/28 7:25:19

PART 1 - 基础知识

一、文件读取

a. 二进制文件

mmap https://stackoverflow.com/questions/44553907/mmap-sigbus-error-and-initializing-the-file
fread fwrite

	//read
    FILE* fi;
    if(fi = fopen("input.bin", "rb")){
    	fread(&p, sizeof(int), 1, fi);
    	fread(&n, sizeof(int), 1, fi);
    	int * a = (int *)malloc((n+1)*sizeof(int));
    	fread(a ,sizeof(int), n, fi);
    	fclose(fi);
    	free(a); 
    }
  // write
    FILE* fo;
    if (fo = fopen("output.bin", "wb")) {
        fwrite(&x, 1,  sizeof(int), fo);
        fwrite(a,1,sizeof(int)*n,fo); 
        fclose(fo);
    }

b. 文本文件

// read
	FILE *f1,*f2;
	f1 = fopen("input.txt","r");
	fscanf(f1,"%d",&xi);
	float *input = (float *)malloc((xi+1)*sizeof(float));
	for(int i=0;i<xi;i++){
		fscanf(f1,"%f",&input[i]); 
	}
	fclose(f1);
	free(input);
//writ
    FILE *out;
    out = fopen("output.dat","w");
    fprintf(out,"%.12g\n",answer);
    fclose(out);

c. 命令行读取参数

int N = atoi(argv[1]);  // 从命令行读取第一个参数

unsigned int N2 = strtoul(argv[2], NULL, 10); // unsigned int 第二个参数

二、sbatch使用

a. 脚本编写

  • CPU编译运行(mpi程序 cpi.c 为例)
#!/bin/bash
module load mpi/2021.8.0
mpicc cpi.c -lm -o cpi

# compile.sh
#!/bin/bash
module load mpi/2021.8.0
export I_MPI_PIN_RESPECT_CPUSET=0;
# mpirun ./cpi
I_MPI_OFI_PROVIDER=tcp mpirun -genv I_MPI_FABRICS=shm:ofi -iface eno2 ./cpi

# job.sh
#!/bin/bash
#run.sh
sbatch --cpus-per-task=1 --nodes=5 --ntasks-per-node=2 --partition=compute --qos=normal --output=output.txt ./job.sh

# run.sh

编译:./compile.sh
提交作业:./run.sh

  • GPU编译运行(cuda为例)
#!/bin/bash

sbatch -p GPU --gres=gpu:1 --gpus=1 -t 00:00:30 -o output.txt -e error.txt compile-job.sh

# compile.sh
#!/bin/bash

sbatch -p GPU --gres=gpu:1 --gpus=1 -t 00:20:00 -o output.txt -e error.txt run-job.sh

# run.sh

compile-job.sh、run-job.sh 需实现

编译:./compile.sh
提交作业:./run.sh

b. 作业管理

  • 查看作业状态:
squeue
  • 取消作业:
scancel XXXX(作业编号)

ps.
srun 直接执行可执行程序
sbatch 提交批处理脚本进行任务计算

三、数据生成器

#!/usr/bin/php
<?php

if ($argc != 4) {
    die("USAGE: {$argv[0]} <OUTPUT_PATH> <P> <N_RANGE>\n");
}

function i32_to_bytes(int $n): array
{
    $rslt = [];
    for ($i = 0; $i < 4; ++$i) {
        $rslt[] = $n & 255;
        $n >>= 8;
    }
    return $rslt;
}

function bytes_to_string(array $n): string
{
    $a = array_map(fn (int $num) => chr($num), $n);
    return join('', $a);
}

function i32_to_string(int $n): string
{
    return bytes_to_string(i32_to_bytes($n));
}

$f = fopen($argv[1], "w");

$p = $argv[2];
$n = (1 << ((int) trim($argv[3])));
$n += $n + rand(0, $n / 2);
$part = 1;
$n = floor($n / $part) * $part;
echo "p={$p}, n={$n}" . PHP_EOL;

fwrite($f, i32_to_string($p));
fwrite($f, i32_to_string($n));
for ($i = 0; $i < ($n / $part); ++$i) {
    $m = i32_to_string(rand(0, 1 << 30));
    fwrite($f, $m);
}

fclose($f);

四、Attention

高精度题目注意:
不能直接 double _N = 1.0 /N; 因为1.0默认是float会损失精度

double dif = 1.0;
double _N = dif/N;
//or
double _N = (double)1.0/N;

运算次序的改变可能会导致精度损失

大数据数组开辟用 malloc

变量初始化记得 赋值

#define里开omp: https://www.thinbug.com/q/56717411
不能在#pragma内使用#define,但是可以在宏定义内将pragma运算符用作_pragma(“omp parallel for”)

#define resize2d_bilinear_kernel(typename) \
_Pragma("omp parallel for schedule(dynamic)") \
		for(){ .... }


PATR 2 - 优化小结

一、矩阵乘法

a. 矩阵分块+访存优化:

在这里插入图片描述

#define BLOCK_SIZE 64
void matMultCPU_Block(const float* a, const float* b, float* c, int n)
{
#pragma omp parallel for schedule(dynamic)
	for (int ii = 0; ii < n; ii +=BLOCK_SIZE)
		for (int jj = 0; jj < n; jj += BLOCK_SIZE)
			for (int kk = 0; kk < n; kk += BLOCK_SIZE)
				for (int i = ii; i < std::min(ii + BLOCK_SIZE,n); i++)
					for (int k = kk; k < std::min(kk+ BLOCK_SIZE,n); k++)
					{
						float s = a[i * n + k];
						for (int j = jj; j < std::min(jj + BLOCK_SIZE, n); j++)
							c[i * n + j] += s * b[k * n + j];
					}

}
// from https://zhuanlan.zhihu.com/p/371893547

b. 向量化

void matMult_avx(double *A, double *B, double *C, size_t n)
{
	for (size_t i = 0; i < n; i += 4) {
		for (size_t j = 0; j < n; j++) {
			__m256d c0 = _mm256_load_pd(C+i+j*n); /* c0 = C[i][j] */
			for (size_t k = 0; k < n; k++) {
				c0 = _mm256_add_pd(c0,
					_mm256_mul_pd(_mm256_load_pd(A+i+k*n),
						_mm256_broadcast_sd(B+k+j*n)));
			}
			_mm256_store_pd(C+i+j*n, c0);  /* C[i][j] = c0 */;
		}
	}
}
// from https://zhuanlan.zhihu.com/p/76347262

c. Cache Blocking+AVX

#define UNROLL 4
#define BLOCKSIZE 32

static inline void do_block(int n, int si, int sj, int sk,
			double *A, double *B, double *C)
{
	for (int i = si; i < si + BLOCKSIZE; i += UNROLL*4) {
		for (int j = sj; j < sj + BLOCKSIZE; j++) {
			__m256d c[UNROLL];
			for (int x = 0; x < UNROLL; x++) {
				c[x] = _mm256_load_pd(C+i+x*4+j*n);
			}
			for (int k = sk; k < sk + BLOCKSIZE; k++) {
				__m256d b = _mm256_broadcast_sd(B+k+j*n);
				for (int x = 0; x < UNROLL; x++) {
					c[x] = _mm256_add_pd(c[x],
						_mm256_mul_pd(
							_mm256_load_pd(A+n*k+x*4+i), b));
				}
			}

			for (int x = 0; x < UNROLL; x++) {
				_mm256_store_pd(C+i+x*4+j*n, c[x]);
			}
		}
	}
}

void dgemm_avx_unroll_blk_omp(size_t n, double *A, double *B, double *C)
{
#pragma omp parallel for
	for (int sj = 0; sj < n; sj += BLOCKSIZE) {
		for (int si = 0; si < n; si += BLOCKSIZE) {
			for (int sk = 0; sk < n; sk += BLOCKSIZE) {
				do_block(n, si, sj, sk, A, B, C);
			}
		}
	}
}
//from https://zhuanlan.zhihu.com/p/76347262

d. 算法优化

  • Coppersmith Winograd algorithm

Introduction: Coppersmith–Winograd algorithm

时间复杂度: O(n2.375477)

算法核心:

// 算法核心
 * matA M*K
 * matB K*N
 * matC M*N
 * matC = matA * matB
 * S1 = A21 + A22     T1 = B12 - B11
 * S2 = S1 - A11      T2 = B22 - T1
 * S3 = A11 - A21     T3 = B22 - B12
 * S4 = A12 - S2      T4 = T2 - B21
 * M1 = A11 * B11     U1 = M1 + M2
 * M2 = A12 * B21     U2 = M1 + M6
 * M3 = S4 * B22      U3 = U2 + M7
 * M4 = A22 * T4      U4 = U2 + M5
 * M5 = S1 * T1       U5 = U4 + M3
 * M6 = S2 * T2       U6 = U3 - U4
 * M7 = S3 * T3       U7 = U3 + M5
 * C11 = U1
 * C12 = U5
 * C21 = U6
 * C22 = U7

代码:https://github.com/YYYYYW/Matrix-Multiplication

二、并行排序

a. Radix Sort

  • Diverting LSD radix sort :https://axelle.me/2022/04/19/diverting-lsd-sort/

思想1. 先基数排序,后桶排序
Through an example

let mut array = vec![97, 57, 17, 2, 87, 56, 33, 30, 40, 21, 27, 71];
// In binary it is: [01100001, 00111001, 00010001, 00000010, 01010111, 00111000, 
//                   00100001, 00011110, 00101000, 00010101, 00011011, 01000111]

Let’s choose a radix equals 2, et let’s sort only the two leftmost levels in a LSD fashion way. The first pass outputs:

assert_eq!(array, vec![2, 71, 17, 87, 30, 21, 27, 97, 33, 40, 57, 56]);
//                  [00000010, 01000111, 00010001, 01010111, 00011110, 00010101, 
//                  |__________________||_______________________________________
//                        **00****                         **01****
//                   00011011, 01100001, 00100001, 00101000, 00111001, 00111000]
//                  _________||____________________________||__________________|
//                                     **10****                 **11****

And the second pass outputs:

assert_eq!(array, vec![2, 17, 30, 21, 27, 33, 40, 57, 56, 71, 87, 97]);
//                  [00000010, 00010001, 00011110, 00010101, 00011011, 00100001, 
//                  |___________________________________________________________
//                                            00******
//                   00101000, 00111001, 00111000, 01000111, 01010111, 01100001]
//                  _____________________________||____________________________|
//                                                          01******

With this example, the second pass has only 2 buckets 00 and 01.
For the first bucket 00:

let mut array = [2, 17, 30, 21, 27, 33, 40, 57, 56]
// swap 1       [2, 17, 21, 30, 27, 33, 40, 57, 56]
// swap 2       [2, 17, 21, 27, 30, 33, 40, 57, 56]
// swpa 3       [2, 17, 21, 27, 30, 33, 40, 56, 57]

end.

  • Parallel Radix Sort(OpenMP):https://github.com/iwiwi/parallel-radix-sort
    use parallel_radix_sort.h

  • Voracious Radix Sort (Rust):https://github.com/lakwet/voracious_sort
    cargo add voracious_radix_sort

b. QuickSort

  • 并行快排(归并):
#include<omp.h>

data_t Partition(data_t* data, int start, int end)   //閸掓帒鍨庨弫鐗堝祦
{
    data_t temp = data[start];   //娴犮儳顑囨稉鈧稉顏勫帗缁辩姳璐熼崺鍝勫櫙
    while (start < end) {
        while (start < end && data[end] >= temp)end--;   //閹垫儳鍩岀粭顑跨娑擃亝鐦崺鍝勫櫙鐏忓繒娈戦弫?
        data[start] = data[end];
        while (start < end && data[start] <= temp)start++;    //閹垫儳鍩岀粭顑跨娑擃亝鐦崺鍝勫櫙婢堆呮畱閺?
        data[end] = data[start];
    }
    data[start] = temp;   //娴犮儱鐔€閸戝棔缍旀稉鍝勫瀻閻e瞼鍤?
    return start;
}

void quickSort(data_t* data, int start, int end)  //骞惰蹇帓
{
    if (start < end) {
        data_t pos = Partition(data, start, end);
        #pragma omp parallel sections    //璁剧疆骞惰鍖哄煙
        {
            #pragma omp section          //璇ュ尯鍩熷鍓嶉儴鍒嗘暟鎹繘琛屾帓搴?
            quickSort(data, start, pos - 1);
            #pragma omp section          //璇ュ尯鍩熷鍚庨儴鍒嗘暟鎹繘琛屾帓搴?
            quickSort(data, pos + 1, end);
        }
    }
}

quickSort(a , 0, n-1); // main

ps. 用openmp并行快排效果不佳,受限于sections子句,用mpi并行效果不错

c. 标准库

  • GNU parallel stl
#include <vector>
#include <parallel/algorithm>

int main()
{
  std::vector<int> v(100);
  // ...
  // Explicitly force a call to parallel sort.
  __gnu_parallel::sort(v.begin(), v.end());
  return 0;
}

在这里插入图片描述

https://gcc.gnu.org/onlinedocs/libstdc++/manual/parallel_mode_using.html

https://www.modernescpp.com/index.php/parallel-algorithms-of-the-stl-with-gcc

  • Intel TBB

Simplified Development for Parallel Applications

在这里插入图片描述

https://github.com/oneapi-src/oneTBB

  • C++17标准STL库

三、高精度求 π

a. 改进的幂级数法

精度高,n取20000(还可以更小)轻松求得15位有效数字

#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
#include<math.h>

double f(double x) {   // inline
	double y = 2 * x + 1;
	double z = pow(-1, x);
	double h1 = 4.0;
	double h2 = 5.0;
	double h3 = 239.0;
	return h1 * z / y*(h1 / pow(h2, y) - 1 / pow(h3, y));
}

int main(int argc, char* argv[])
{
	int myid, numprocs, namelen;
	double pi, sum, x, *temp;
	long long n;
	char processor_name[MPI_MAX_PROCESSOR_NAME];
	char* pi_norm = "3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938";
	
	MPI_Init(&argc, &argv);        // starts MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &myid);  // get current process id
	MPI_Comm_size(MPI_COMM_WORLD, &numprocs);      // get number of processes
	MPI_Get_processor_name(processor_name, &namelen);
	n = 20000;

	if (myid == 0) {
		temp = (double*)malloc(sizeof(double)*numprocs);
	}
	MPI_Bcast(&n, 1, MPI_LONG_LONG, 0, MPI_COMM_WORLD); //靠
	sum = 0.0, pi = 0.0;
	for (long long i = myid; i <= n; i += numprocs) {
		sum += f(i);
	}
	MPI_Gather(&sum, sizeof(sum), MPI_BYTE, temp, sizeof(sum), MPI_BYTE, 0, MPI_COMM_WORLD);
	if (myid == 0) {
		for (int i = 0; i < numprocs; i++) {
			pi += temp[i];
		}
		FILE *out;
    	out = fopen("output.txt","w");
    	fprintf(out,"%.15g\n",pi);
    	fclose(out);
		free(temp);
	}
	MPI_Finalize();
	return 0;
}
  • 五种方式 MPICH2 并行计算π: https://github.com/lang22/MPI-PI

四、二维卷积(AVX)

a. Conv2D-AVX512

float *input = (float *)aligned_alloc(64, N*sizeof(float));
float *kernel = (float *)aligned_alloc(64, M*sizeof(float));
float *ans = (float *)aligned_alloc(64, N*sizeof(float));
    	
	for(int i=0; i<xi-xk+1;i++){
		for(int j=0;j<yi-yk+1;j++){
		//float temp = 0.0;
         __m512 tmp = _mm512_setzero_ps();
			for(int m=0;m<xk;m++){
				for(int n=0;n<yk;n+=16){
                    tmp = _mm512_add_ps(tmp,_mm512_mul_ps(_mm512_loadu_ps(&kernel[m*yk+n]),_mm512_loadu_ps(&input[(i+m)*yi+j+n])));
					//temp += kernel[m*yk+n] * input[(i+m)*yi+j+n]; 
				}
			}
        	ans[i*(ya)+j] = tmp[0]+tmp[1]+tmp[2]+tmp[3]+tmp[4]+tmp[5]+tmp[6]+tmp[7]+tmp[8]+tmp[9]+tmp[10]+tmp[11]+tmp[12]+tmp[13]+tmp[14]+tmp[15];
			//ans[i*(ya)+j] = temp;
		}
	}

------ 补充 ------

b. 内存对齐

  • aligned 原理

void* aligned_malloc(size_t required_bytes, size_t alignment)
{
    int offset = alignment - 1 + sizeof(void*);
    void* p1 = (void*)malloc(required_bytes + offset);
    if (p1 == NULL)
        return NULL;
    void** p2 = (void**)( ( (size_t)p1 + offset ) & ~(alignment - 1) );
    p2[-1] = p1;
    return p2;
}

void aligned_free(void *p2)
{
    void* p1 = ((void**)p2)[-1];
    free(p1);
}
  • 数据对齐以辅助向量化说明(Intel) Data Alignment to Assist Vectorization

c. SIMD

  • 玩转SIMD指令编程 :https://zhuanlan.zhihu.com/p/591900754
  • AVX512:Intrinsics for Intel® Advanced Vector Extensions 512 (Intel® AVX-512) Instructions

d. Xsimd

  • https://github.com/xtensor-stack/xsimd

xsimd provides a unified means for using these features for library authors. Namely, it enables manipulation of batches of numbers with the same arithmetic operators as for single values. It also provides accelerated implementation of common mathematical functions operating on batches.

  • https://xsimd.readthedocs.io/en/latest/

五、CUDA(C)

a. 基本模式

// init
	 malloc(...);

	 cudaMalloc((void **)&,  n*sizeof( ));
	 cudaMemcpy(GPU, CPU,  sizeof, cudaMemcpyHostToDevice);
	 dim3 block();
	 dim3 grid();
	 <<<grid, block>>>
	 cudaThreadSynchronize();
	 cudaMemcpy(CPU, GPU,  sizeof, cudaMemcpyDeviceToHost);
	 cudaFree();
 
 // f
 __global__ void f(){
 		// GPU CODE
 }

b. 循环代码并行

  • CPU code
// cpu
for(int i=0;i<n;i++){
	for(int j=0;j<m;j++){
		// code
	}
}

  • GPU code

一维

__global__ void fGPU(){
  	int index = blockIdx.x * blockDim.x + threadIdx.x;
    int i = index / nn;
    int j = index % nn;
    // code
}


// main
const int dimx = 128;
fGPU<<<n*m/128, 128>>>();

二维

__global__ void fGPU(){
  	int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;
		if((i< n && j<m){
    // code
    }
}	

	int dimx = 16;
    int dimy = 16;
    dim3 block(dimx,dimy);
    dim3 grid((n+block.x-1)/block.x,(my+block.y-1)/block.y);
    fGPU<<<grid, block>>>();

c. 优化原子操作

__global void histogram_kernel_v1(unsigned char *arry,unsigned int *bins){
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
// we do not need to check boundary here as ARRSZ is a power of 2.
    atomicAdd(&bins[arry[tid]],1u);
}

void compute(){
    const int block_size = 256;
    histogram_kernel_v1<<<ARRSZ/block_size,block_size>>>(arr_gpu,bin_gpu);
    assert(cudaSuccess == cudaDevicechronize());
}

  • 利用 shared memery
__shared__ unsigned int bins_shared[256];

__global void histogram_kernel_v3_5(unsigned char *arry,unsigned int *bins){
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    bins_shared[threadIdx.x] = 0;    // block size assumed to just 256
    __syncthtreads();

    atomicAdd(&bins[arry[tid]],1u);
    __syncthtreads();

    atomicAdd(&bins[threadIdx.x],bins_shared[threadIdx.x]);
}

void compute(){
    const int block_size = 256;   // do not change this
    histogram_kernel_v1<<<ARRSZ/block_size,block_size>>>(arr_gpu,bin_gpu);
    assert(cudaSuccess == cudaDevicechronize());
}

d. cuFFT库

cuFFT库提供了一个优化且基于CUDA实现的快速傅里叶变换(FFT)
https://docs.nvidia.com/cuda/cufft/

// 3D FFT
#include <cufft.h>
#include <complex>

void my_fft(int c, int r, std::complex<double> *in, std::complex<double> *out) {
    cufftHandle plan;
    cufftPlan3d(&plan, c, c, c, CUFFT_Z2Z);
    cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex*>(in),
                 reinterpret_cast<cufftDoubleComplex*>(out),
                 CUFFT_INVERSE);
    cufftDestroy(plan);
}

nvcc -lcufft cufft.cu -o cufft

References

slurm作业管理系统怎么用?
矩阵乘法的并行优化(3):共享内存多核CPU优化
矩阵乘法优化过程(DGEMM)
Data Alignment to Assist Vectorization
玩转SIMD指令编程
Intrinsics for Intel® Advanced Vector Extensions 512 (Intel® AVX-512) Instructions

Links

mmap:https://stackoverflow.com/questions/44553907/mmap-sigbus-error-and-initializing-the-file
#define: https://www.thinbug.com/q/56717411
Coppersmith Winograd algorithm:https://github.com/YYYYYW/Matrix-Multiplication
Diverting LSD radix sort :https://axelle.me/2022/04/19/diverting-lsd-sort/
Parallel Radix Sort(OpenMP):https://github.com/iwiwi/parallel-radix-sort
Voracious Radix Sort (Rust):https://github.com/lakwet/voracious_sort
GNU parallel stl:https://gcc.gnu.org/onlinedocs/libstdc++/manual/parallel_mode_using.html
https://www.modernescpp.com/index.php/parallel-algorithms-of-the-stl-with-gcc
Intel TBB:https://github.com/oneapi-src/oneTBB
并行求 π: https://github.com/lang22/MPI-PI
Xsimd: https://github.com/xtensor-stack/xsimd
cuFFT:https://docs.nvidia.com/cuda/cufft/

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/175111.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

JVM调优实战——jvm常用参数及方法

一、创建会内存溢出的程序 pom&#xff1a; <project xmlns"http://maven.apache.org/POM/4.0.0" xmlns:xsi"http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/ma…

Q_DISABLE_COPY、Q_DISABLE_MOVE、Q_DISABLE_COPY_MOVE用法详解及总结

1.前言在编程中&#xff0c;会用到某些资源&#xff0c;这些资源有的在整个应用程序期间是唯一的&#xff1b;是不能通过拷贝、赋值的方法存在多份的&#xff0c;如STL的std::unique_ptr指针指向的资源。现实中这样的资源有&#xff1a;文件指针、串口句柄等。试想如果存在多个…

TVM: End-to-End Optimization Stack for Deep Learning论文阅读

摘要 很多目前最为流行的深度学习框架&#xff0c;如 TensorFlow、MXNet、Caffe 和 PyTorch&#xff0c;支持在有限类型的服务器级 GPU 设备上获得加速&#xff0c;这种支持依赖于高度特化、供应商特定的 GPU 库。然而&#xff0c;专用深度学习加速器的种类越来越多&#xff0…

数据库系统概念 | 第四章:中级SQL

文章目录&#x1f4da; 连接表达式&#x1f407; 自然连接&#x1f407; 连接条件&#x1f955;natural条件&#x1f955;using 条件&#x1f955;on 条件&#x1f407; 内连接和外连接&#x1f955; 内连接inner join&#x1f955; 外连接outer join&#x1f343; 左外连接lef…

Web 应用渗透测试 00 - 信息收集

背景 这个系列写 Web 应用渗透测试相关的内容。此篇从信息收集开始&#xff0c;看一下 Web 应用端有哪些方面的信息值得渗透测试者去收集&#xff0c;能对后续的行动产生积极的影响。 Web 应用渗透测试 - 信息收集 security.txt 这个文件包含了网站的漏洞披露的联系方式。如…

Java面试题每日10问(18)

Miscellaneous Interview Questions 1. What are the advantages and disadvantages of object cloning? Advantage of Object Cloning You don’t need to write lengthy and repetitive codes. Just use an abstract class with a 4- or 5-line long clone() method.It is t…

二叉树的迭代遍历

二叉树的迭代遍历 前序遍历 基本思路 基本思路其实很简单, 使用递归遍历的时候, 一直是系统帮我们把其他数据压栈, 举个例子 > ans [5,4,6,2,1,null,null] 前序遍历的序列是: [5,4,2,1,6] , 栈的出入顺序是, 先入, 后出, 假如我们想要一个元素先出, 就要让它后入栈 基…

STC12驱动MLX90614红外测温模块在LCD1602显示

文章目录1、基本简介2、通信方式3、参考STC12例程参考文献1、基本简介 2、通信方式 通过芯片手册我们可以了解到这个模块的输出有PWM和SMBus方式&#xff0c;PWM长期做嵌入式开发的已经很熟悉了&#xff0c;那么什么是SMBus呢&#xff1f; SMBus&#xff08;系统管理总线&…

swift(3)

目录 while循环&#xff0c;repeat while循环 String基本操作 Array数组 Set集合 while循环&#xff0c;repeat while循环 import UIKitvar a0 while(a<5){print(a) }简单的while循环&#xff0c;我这一个循环下去&#xff0c;我playground直接被强制退出。 import UIK…

go的基本数据类型转换

目录 1.(整形转化)基本语法 2.小知识 3.基本数据类型和string的转换 A.fmt.Sprintf("%参数", 表达式) B. 使用 strconv 包的函数 4.string和基本数据类型转换 Go在不同类型的变量之间赋值时需要显示转换&#xff0c;不能自动转换 1.(整形转化)基本语法 A.不考…

Elasticsearch7.8.0版本高级查询—— 模糊查询文档

目录一、初始化文档数据二、模糊查询文档2.1、概述2.2、示例12.3、示例2一、初始化文档数据 在 Postman 中&#xff0c;向 ES 服务器发 POST 请求 &#xff1a;http://localhost:9200/user/_doc/1&#xff0c;请求体内容为&#xff1a; { "name":"zhangsan"…

LeetCode 1824. 最少侧跳次数

【LetMeFly】1824.最少侧跳次数 力扣题目链接&#xff1a;https://leetcode.cn/problems/minimum-sideway-jumps/ 给你一个长度为 n 的 3 跑道道路 &#xff0c;它总共包含 n 1 个 点 &#xff0c;编号为 0 到 n 。一只青蛙从 0 号点第二条跑道 出发 &#xff0c;它想要跳到…

SpringCloud(16):Zuul网关服务整合Swagger接口文档

手写Api文档的几个痛点 文档需要更新的时候&#xff0c;需要再次发送一份给前端&#xff0c;也就是文档更新交流不及时。接口返回结果不明确不能直接在线测试接口&#xff0c;通常需要使用工具&#xff0c;比如postman接口文档太多&#xff0c;不好管理1 springcloud快速集成 …

LINUX---进程间通信(IPC)

目录进程间通信&#xff08;IPC&#xff09;介绍&#xff1a;一、管道1、特点&#xff1a;2、原型&#xff1a;父子进程中的管道应用&#xff1a;FIFO1、特点2、原型三、消息队列ftok移除消息队列四.共享内存信号信号携带消息信号量微信QQ聊天就是进程间通信&#xff08;基于网…

大数据技术之Hadoop(MapReduce)

第1章 MapReduce概述 1.1 MapReduce定义 MapReduce是一个分布式运算程序的编程框架&#xff0c;是用户开发“基于Hadoop的数据分析应用”的核心框架。 MapReduce核心功能是将用户编写的业务逻辑代码和自带默认组件整合成一个完整的分布式运算程序&#xff0c;并发运行在一个H…

StructuredStreaming Sink

StructuredStreaming Sink Output Modes append 默认追加模式, 将新的数据输出&#xff0c;只支持简单查询 complete 完整模式&#xff0c;支持聚合和排序 update 更新模式&#xff0c;支持聚合不支持排序&#xff0c;没有聚合和append一样 下面这段操作&#xff0c;有聚合…

Python机器学习数据建模与分析——Numpy和Pandas综合应用案例:空气质量监测数据的预处理和基本分析

本篇文章主要以北京市空气质量监测数据为例子&#xff0c;聚集数据建模中的数据预处理和基本分析环节&#xff0c;说明Numpy和Pandas的数据读取、数据分组、数据重编码、分类汇总等数据加工处理功能。同时在实现案例的过程中对用到的Numpy和Pandas相关函数进行讲解。 文章目录数…

新的一年即将到来,分享2023年火爆的行业和值得做的副业兼职项目

明天就是除夕啦&#xff0c;小编还在努力工作着&#xff0c;分享完这一篇文章后&#xff0c;小编也要和家人朋友们一起好好休息下&#xff0c;过一个乐此不疲的春节。今天分享的主要是对明年行业的憧憬以及一些值得做的副业兼职项目&#xff0c;文章比较长&#xff0c;如果觉得…

Spring源码学习:setter循环依赖

1.案例&#xff1a;Component public class A {private B b;Autowiredpublic void setB(B b) {this.b b;}public B getB() {return b;}public void f(){System.out.println(b);} }Component public class B {private A a;Autowiredpublic void setA(A a) {this.a a;}public v…

Ubuntu 终端美化(oh-my-zsh)

文章目录Ubuntu 终端美化&#xff08;oh-my-zsh&#xff09;一、 环境准备二、 配置文件1、 主题2、 修改插件2.1 官方插件2.2 第三方插件Ubuntu 终端美化&#xff08;oh-my-zsh&#xff09; 一、 环境准备 这个美化教程适合于大多数的 Linux 系统&#xff0c;其实可以通用的。…