自己调了一套,比较好用,做一下记录

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import numpy as np
import cv2
debug=False # 调试的时候打开
def generate_distance(slice_content, bg_content):
if debug:
with open('slice.png', 'wb') as f:
f.write(slice_content)
f.close()
with open('bg.png', 'wb') as f:
f.write(bg_content)
f.close()

"""
:param bg_url: 背景图地址
:param slice_url: 滑块图地址
:return: distance
:rtype: Integer
"""
slice_image = np.asarray(bytearray(slice_content), dtype=np.uint8)
slice_image = cv2.imdecode(slice_image, 1)
slice_image = cv2.Canny(slice_image, 22, 22) # 不准的话就调这几个参数,慢慢调

bg_image = np.asarray(bytearray(bg_content), dtype=np.uint8)
bg_image = cv2.imdecode(bg_image, 1)
bg_image = cv2.pyrMeanShiftFiltering(bg_image, 20, 11)# 不准的话就调这几个参数,慢慢调
bg_image = cv2.Canny(bg_image, 20, 20)# 不准的话就调这几个参数,慢慢调

bg_image = cv2.cvtColor(bg_image, cv2.COLOR_GRAY2RGB)
slice_image = cv2.cvtColor(slice_image, cv2.COLOR_GRAY2RGB)

result = cv2.matchTemplate(bg_image, slice_image, cv2.TM_CCOEFF_NORMED)

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if debug:
th, tw = slice_image.shape[:2]
tl = max_loc # 左上角点的坐标
br = (tl[0] + tw, tl[1] + th) # 右下角点的坐标
cv2.rectangle(bg_image, tl, br, (0, 0, 255), 2) # 绘制矩形
cv2.imwrite('out.jpg', bg_image) # 保存在本地

return [max_loc[0]]

另外可以把成功的图片做一下缓存记录,很多时候能利用上
我的方式:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import redis
import hashlib
R = redis.Redis(host='127.0.0.1', port=6379, db=0,, decode_responses=True)

def md5(s):
m = hashlib.md5()
m.update(s.encode())
return m.hexdigest()

if not R.exists(f'平台名字_{md5(str(bg_content))}'):
代码省略...(处理图片并判断)
R.set(f'平台名字_{md5(str(bg_content))}',value) # md5 自己实现 value是识别的结果 bg_content 是背景图的二进制流
else:
value = R.get(f'平台名字_{md5(str(bg_content))}')


补充一个网上比较好用的

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import cv2
import numpy as np
import requests
def generate_distance(slice_url, bg_url):
"""
:param bg_url: 背景图地址
:param slice_url: 滑块图地址
:return: distance
:rtype: Integer
"""
slice_image = np.asarray(bytearray(requests.get(slice_url).content), dtype=np.uint8)
slice_image = cv2.imdecode(slice_image, 1)
slice_image = cv2.Canny(slice_image, 255, 255)

bg_image = np.asarray(bytearray(requests.get(bg_url).content), dtype=np.uint8)
bg_image = cv2.imdecode(bg_image, 1)
bg_image = cv2.pyrMeanShiftFiltering(bg_image, 5, 50)
bg_image = cv2.Canny(bg_image, 255, 255)

result = cv2.matchTemplate(bg_image, slice_image, cv2.TM_CCOEFF_NORMED)

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

return max_loc[0]