admin
2023-06-01 08c79956d76fe359737918b1b0b5d6c2aa8c8a47
浏览器接口优化
8个文件已修改
6个文件已添加
624 ■■■■■ 已修改文件
WebView2Loader.dll 补丁 | 查看 | 原始文档 | blame | 历史
cartoon/cartoon_data_import.py 137 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
cartoon/cartoon_spider.py 281 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
cartoon/mysql_data.py 89 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
gui_wx.py 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
kpl/gui.py 16 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
kpl/gui.spec 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
kpl/kpl_api.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
main.py 25 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
res/setting.conf 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
test.png 补丁 | 查看 | 原始文档 | blame | 历史
ths_util.py 12 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tui_liu.spec 44 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
win32_util.py 5 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
WebView2Loader.dll
Binary files differ
cartoon/cartoon_data_import.py
New file
@@ -0,0 +1,137 @@
"""
卡通数据导入
"""
import json
import os
from cartoon.mysql_data import Mysqldb
__mysql_db = Mysqldb()
def __read_file(path):
    with open(path, mode='r', encoding="utf-8") as f:
        lines = f.readlines()
        return lines[0]
    return None
def import_recommend():
    # 解析数据
    types = [59, 60, 61, 62, 63, 64, 65, 66]
    for t in types:
        data = __read_file(f"E:\\动漫\\文网文素材\\recommend\\{t}.json")
        data = json.loads(data)
        for d in data:
            __mysql_db.execute(
                f"insert into ss_book_recommend_books(book_id,recommend_type_id,create_time) values({d['book_id']},{t},now())")
def import_book():
    root = "E:\\动漫\\文网文素材\\book_list"
    pages = os.listdir(root)
    for page in pages:
        path = "\\".join([root, page])
        print(path)
        data = __read_file(path)
        data = json.loads(data)
        for d in data:
            __mysql_db.execute(
                f"insert into ss_book(id,author,short_title,name,cover,end_state,popularity,create_time) values({d['id']},'{d['book_author']}','{d['short_title']}','{d['book_name']}','{d['book_cover']}',{d['end_state']},{d['popularity']},now())")
def import_book_detail():
    root = "E:\\动漫\\文网文素材\\book_detail"
    pages = os.listdir(root)
    for page in pages:
        path = "\\".join([root, page])
        print(path)
        data = __read_file(path)
        data = json.loads(data)
        id = page.split(".")[0]
        print(id)
        like_count = 0
        if data[7].find("万") > -1:
            like_count = int(round(float(data[7].replace('万', '')) * 10000, 0))
        else:
            like_count = int(data[7])
        collect_count = 0
        if data[8].find("万") > -1:
            collect_count = int(round(float(data[8].replace('万', '')) * 10000, 0))
        else:
            collect_count = int(data[8])
        __mysql_db.execute(
            f"update ss_book set tags='{','.join(data[2])}',update_info='{data[3]}',`desc`='{data[4]}', like_count={like_count}, collect_count={collect_count} where id={id}")
def import_book_chapter():
    root = "E:\\动漫\\文网文素材\\chap_list"
    pages = os.listdir(root)
    for page in pages:
        path = "\\".join([root, page])
        print(path)
        data = __read_file(path)
        data = json.loads(data)
        id = page.split(".")[0]
        max_date = "1990-01-01"
        vip = False
        latest_cid = None
        for d in data:
            latest_cid = d['cId']
            item = [str(d['cId']), str(id), f"'{d['cName']}'", f"'{d['cDate']}'", f"'{d['cImg']}'", str(d['cLike']),
                    str(d['cBS']), str(d['vF']), str(d['ft']), str(d['r']),
                    str(d['cTeenager'])]
            if int(max_date.replace("-", "")) < int(d['cDate'].replace("-", "")):
                max_date = d['cDate']
            if d['cBS']:
                vip = True
        __mysql_db.execute(
            f"update ss_book set vip = {1 if vip else 0},latest_cid={latest_cid} , update_date = '{max_date}' where id = {id}")
# 导入评论
def import_comment():
    root = "E:\\动漫\\文网文素材\\comment_list"
    pages = os.listdir(root)
    for page in pages:
        path = "\\".join([root, page])
        print(path)
        data = __read_file(path)
        data = json.loads(data)
        id = page.split(".")[0]
        for d in data:
            item = [str(d['id']), str(id), f"{d['userid']}", f"'{d['content']}'", f"'{d['create_time']}'",
                    str(d['replie_count']),
                    str(d['like_count']), str(d['star_num'])]
            sql = f"select * from ss_comment where id={item[0]}"
            if not __mysql_db.select_one(sql):
                print("插入", f"insert into ss_comment values({','.join(item)})")
                __mysql_db.execute(
                    f"insert into ss_comment values({','.join(item)})")
            if not __mysql_db.select_one(f"select * from ss_comment_user where id={d['userid']}"):
                __mysql_db.execute(
                    f"insert into ss_comment_user values({d['user_avatar']['id']},'{d['user_avatar']['user_name']}','{d['user_avatar']['user_img']}',{d['user_avatar']['vipType']})")
def update_vertical_cover(id,cover):
    __mysql_db.execute(f"update ss_book set unruly='{cover}' where id={id}")
def get_vertical_cover(id):
    results = __mysql_db.select_one(f"SELECT unruly FROM ss_book where id={id}")
    if results and results[0]:
        return results[0]
    return None
def list_comment_user_avtor():
    results = __mysql_db.select_all(f"SELECT img FROM ss_comment_user where img like '%qiremanhua%'")
    results = [ x[0] for x in results]
    return results
if __name__ == "__main__":
    print(list_comment_user_avtor())
cartoon/cartoon_spider.py
New file
@@ -0,0 +1,281 @@
import json
import os
import random
import time
from bs4 import BeautifulSoup
from urllib.parse import urlencode
import requests
from cartoon import cartoon_data_import
class QireManHuaSpider:
    def __parse_html(self, data):
        soup = BeautifulSoup(data, "lxml")
        return soup
    # 书本列表
    def list_books(self, page):
        result = requests.post("https://m.qiremanhua.com/book/book_cate_ajax",
                               {"tid": "all", "vip": "all", "end": "all", "sort": 1, "page": page,
                                "random": random.randint(0, 10000)})
        return result.text
    def chaplist(self, book_id):
        result = requests.post("https://m.qiremanhua.com/book/chaplist_ajax",
                               {"book_id": book_id, "sort": 1})
        return result.text
    def commentlist(self, book_id, page):
        result = requests.post("https://m.qiremanhua.com/comment/list_comment_ajax",
                               {"bookId": book_id, "page": page, "random": random.randint(0, 10000)})
        return result.text
    def book_detail(self, book_id):
        result = requests.get(f"https://m.qiremanhua.com/book/{book_id}/")
        return result.text
    def chap_content(self, book_id, cid):
        result = requests.get(f"https://m.qiremanhua.com/book/{book_id}/{cid}/")
        return result.text
    def vertical_cover(self, key):
        result = requests.get(f"https://m.qiremanhua.com/book/search?key={key}")
        soup = self.__parse_html(result.text)
        items = soup.find("div", class_="books-rows").find_all("div", class_="item")
        for item in items:
            title = item.find("div", class_="title").text.strip()
            if title == key.strip():
                return item.find("img")["src"]
        return None
    # 首页推荐更多解析
    def home_recommend_more(self, id_, page):
        result = requests.post("https://m.qiremanhua.com/book/more_ajax",
                               {"id": id_, "page": page, "random": random.randint(0, 10000)})
        return result.text
    def parse_list(self, data):
        data = json.loads(data)
        try:
            if data["status"] == 1 and data["state"] == 1:
                return data["data"]
            return None
        except Exception as e:
            print(data)
        return []
    def parse_content(self, data):
        soup = self.__parse_html(data)
        sp = soup.find("div", class_="episode-detail").find_all("img")
        imgs = []
        for p in sp:
            imgs.append(p["data-original"])
        return imgs
    # 详情(封面,标题,(分类1,分类2),更新信息,简介,作者,人气值,点赞人数,已收藏)
    def parse_detail(self, data):
        soup = self.__parse_html(data)
        sp = soup.find("div", class_="book-hero").find("img")
        cover = sp["src"].strip()
        title = soup.find("div", class_="book-hero__detail").find("h1").text.strip()
        tags = []
        for t in soup.find("div", class_="book-hero__detail").find("div", class_="tags").find_all("div"):
            tags.append(t.text.strip())
        update_info = ""
        if soup.find("div", class_="book-container__head").find("div", class_="update"):
            update_info = soup.find("div", class_="book-container__head").find("div",
                                                                               class_="update").text.strip().replace(
                "更新:", "")
        desc = soup.find("div", class_="book-container__detail").text.strip()
        author = soup.find("div", class_="book-container__author").text.strip().replace("作者:", "")
        vals = soup.find("div", class_="book-container__row").find_all("div", class_="number")
        return cover, title, tags, update_info, desc, author, vals[0].text, vals[1].text, vals[2].text
def download_file(path, img):
    r = requests.get(img, headers={"Referer": "https://m.qiremanhua.com/",
                                   "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/603.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/603.1"})
    with open(path, 'wb') as f:
        f.write(r.content)
def save_file(path, text):
    with open(path, mode='w', encoding="utf-8") as f:
        f.write(text)
def read_file(path):
    with open(path, mode='r', encoding="utf-8") as f:
        lines = f.readlines()
        return lines[0]
spider = QireManHuaSpider()
def spide_detail(book_id):
    # 获取详情
    path = f"E:\\动漫\\文网文素材\\book_detail\\{book_id}.json"
    if not os.path.exists(path):
        detail = spider.parse_detail(spider.book_detail(book_id))
        save_file(path, json.dumps(detail))
    path = f"E:\\动漫\\文网文素材\\chap_list\\{book_id}.json"
    if not os.path.exists(path):
        # 获取章节
        chaplist = spider.parse_list(spider.chaplist(book_id))
        save_file(path, json.dumps(chaplist))
    # 获取评论
    path = f"E:\\动漫\\文网文素材\\comment_list\\{book_id}.json"
    if not os.path.exists(path):
        page = 0
        comment_list = []
        while True:
            page += 1
            temp_list = spider.parse_list(spider.commentlist(book_id, page))
            comment_list += temp_list
            if len(temp_list) < 10:
                break
            else:
                time.sleep(0.3)
        save_file(path, json.dumps(comment_list))
    print("完成爬取", book_id)
# 爬取内容
def spide_content(book_id):
    path = f"E:\\动漫\\文网文素材\\chap_list\\{book_id}.json"
    if not os.path.exists(path):
        return
    with open(path, mode='r', encoding="utf-8") as f:
        data = f.readline()
        data = json.loads(data)
        for d in data:
            if d['cBS'] == 0:
                cid = d['cId']
                content = spider.chap_content(book_id, cid)
                content = spider.parse_content(content)
                has_download = False
                for i in range(len(content)):
                    path_ = f"E:\\动漫\\文网文素材\\content_list\\{book_id}\\{cid}\\{i}.jpg"
                    dir_ = os.path.abspath(os.path.join(path_, os.pardir))
                    if not os.path.exists(dir_):
                        os.makedirs(dir_)
                    if not os.path.exists(path_):
                        # 下载图片
                        download_file(path_, content[i])
                        has_download = True
                if has_download:
                    time.sleep(1)
def __get_cover():
    for p in range(1, 100):
        path = f"E:\\动漫\\文网文素材\\book_list\\{p}.json"
        if not os.path.exists(path):
            continue
        data = read_file(path)
        data = json.loads(data)
        for d in data:
            cover = d["book_cover"]
            ps = cover.split("/")
            dir_ = f"E:\\动漫\\文网文素材\\cover\\{ps[-2]}"
            if not os.path.exists(dir_):
                os.mkdir(dir_)
            if os.path.exists(f"{dir_}\\{ps[-1]}"):
                continue
            download_file(f"{dir_}\\{ps[-1]}", d["book_cover"])
            print("横屏封面", d["id"], d["book_name"], d["book_cover"])
def __get_vertical_cover():
    for p in range(1, 100):
        path = f"E:\\动漫\\文网文素材\\book_list\\{p}.json"
        if not os.path.exists(path):
            continue
        data = read_file(path)
        data = json.loads(data)
        for d in data:
            cover = cartoon_data_import.get_vertical_cover(d["id"])
            if not cover or len(cover) < 20:
                cover = spider.vertical_cover(d['book_name'])
                if cover:
                    cartoon_data_import.update_vertical_cover(d["id"], cover)
                else:
                    print("没有搜索到:",d["id"],d['book_name'])
            if not cover:
                continue
            ps = cover.split("/")
            dir_ = f"E:\\动漫\\文网文素材\\cover\\{ps[-2]}"
            if not os.path.exists(dir_):
                os.mkdir(dir_)
            if os.path.exists(f"{dir_}\\{ps[-1]}"):
                continue
            download_file(f"{dir_}\\{ps[-1]}", cover)
            print("竖版封面", d["id"], d["book_name"], cover)
def __get_comment_user_avtor():
        avtors = cartoon_data_import.list_comment_user_avtor()
        index = 0
        for cover in avtors:
            index+=1
            if not cover:
                continue
            if cover.find("/uploads/")<0:
                continue
            ps = cover.split("/uploads/")[1].split("/")
            dir_ = f"E:\\动漫\\文网文素材\\"+"\\".join( ps[:-1])
            if not os.path.exists(dir_):
                os.makedirs(dir_)
            if os.path.exists(f"{dir_}\\{ps[-1]}"):
                continue
            download_file(f"{dir_}\\{ps[-1]}", cover)
            print("头像下载", index, cover)
def __get_home_recommend():
    ids = [59, 60, 61, 62, 63, 64, 65, 66]
    for id_ in ids:
        page = 1
        fdata = []
        while True:
            data = spider.home_recommend_more(id_, page)
            page += 1
            data = spider.parse_list(data)
            fdata += data
            if len(data) < 10:
                break
        # 保存文档
        path = f"E:\\动漫\\文网文素材\\recommend\\{id_}.json"
        if not os.path.exists(path):
            save_file(path, json.dumps(fdata))
if __name__ == "__main__0":
    __get_home_recommend()
if __name__ == "__main__":
    __get_comment_user_avtor()
    # count = 0
    # for p in range(1, 100):
    #     path = f"E:\\动漫\\文网文素材\\book_list\\{p}.json"
    #     if not os.path.exists(path):
    #         continue
    #     data = read_file(path)
    #     data = json.loads(data)
    #     for d in data:
    #         count += 1
    #         print("开始爬取", d["id"])
    #         # spide_detail(d["id"])
    #         spide_content(d["id"])
    # print("总数", count)
    # data = spider.chap_content("10762", "57721")
    # print(spider.parse_detail(spider.book_detail("10762")))
cartoon/mysql_data.py
New file
@@ -0,0 +1,89 @@
# 先要导入pymysql
import logging
import pymysql
# 把连接参数定义成字典
config = {
    "host": "192.168.3.122",
    "port": 3306,
    "database": "cartoon",
    "charset": "utf8",
    "user": "root",
    "passwd": "123456"
}
class Mysqldb:
    # 初始化方法
    def __init__(self):
        # 初始化方法中调用连接数据库的方法
        self.conn = self.get_conn()
        # 调用获取游标的方法
        self.cursor = self.get_cursor()
    # 连接数据库的方法
    def get_conn(self):
        # **config代表不定长参数
        conn = pymysql.connect(**config)
        return conn
    # 获取游标
    def get_cursor(self):
        cursor = self.conn.cursor()
        return cursor
    # 查询sql语句返回的所有数据
    def select_all(self, sql):
        self.cursor.execute(sql)
        return self.cursor.fetchall()
    # 查询sql语句返回的一条数据
    def select_one(self, sql):
        self.cursor.execute(sql)
        return self.cursor.fetchone()
    # 查询sql语句返回的几条数据
    def select_many(self, sql, num):
        self.cursor.execute(sql)
        return self.cursor.fetchmany(num)
    # 增删改除了SQL语句不一样其他都是一样的,都需要提交
    def execute(self, sql, args=None):
        try:
            # 执行语句
            self.cursor.execute(sql, args)
            # 提交
            self.conn.commit()
        except Exception as e:
            print("提交出错\n:", e)
            logging.exception(e)
            # 如果出错要回滚
            self.conn.rollback()
    def execute_many(self, sql, args=None):
        try:
            # 执行语句
            self.cursor.executemany(sql, args)
            # 提交
            self.conn.commit()
        except Exception as e:
            logging.exception(e)
            print("提交出错\n:", e)
            # 如果出错要回滚
            self.conn.rollback()
    # 当对象被销毁时,游标要关闭,连接也要关闭
    # 创建时是先创建连接后创建游标,关闭时是先关闭游标后关闭连接
    def __del__(self):
        self.cursor.close()
        self.conn.close()
if __name__ == '__main__':
    mysqldb = Mysqldb()
    # 插入单条数据
    mysqldb.execute("insert into clients(account,pwd,rule) values(%s,%s,%s)", ("test", 123456, "\"123"))
    # 插入多条数据
    mysqldb.execute_many("insert into clients(account,pwd,rule) values(%s,%s,%s)", [("test", 123456, "\"123"),("test", 123456, "\"123")])
gui_wx.py
@@ -1297,6 +1297,7 @@
                continue
            rect = win32gui.GetWindowRect(hwnd)
            win32_util.visual_click(hwnd, (160, (rect[3] - rect[1]) // 2))
            # win32_util.visual_keyboard_F5(hwnd)
            time_space = setting.get_ths_auto_refresh_time_space()
            if time_space is None:
                time_space = 500
kpl/gui.py
@@ -94,6 +94,8 @@
        create_thread(lambda: create_circulate_task(self.__exec_fengxiang))
        time.sleep(0.5)
        create_thread(lambda: create_circulate_task(self.__exec_industry_rank))
        time.sleep(0.5)
        create_thread(lambda: create_circulate_task(self.__exec_jingxuan_rank))
    def __init__(self):
        self.__init_data()
@@ -107,7 +109,7 @@
        boxsier.Add(self.item_bidding[0], 0, wx.TOP, 5)
        self.items = []
        names = ["涨停采集", "炸板采集", "跌停采集", "曾跌停采集", "市场风口采集", "最强风口采集", "风向标采集", "行业涨幅"]
        names = ["涨停采集", "炸板采集", "跌停采集", "曾跌停采集", "市场风口采集", "最强风口采集", "风向标采集", "行业涨幅", "精选流入"]
        for name in names:
            self.items.append(self.__create_item(name))
            boxsier.Add(self.items[-1][0], 0, wx.TOP, 5)
@@ -159,7 +161,7 @@
    def __init_data(self):
        self.capture_status = {}
        self.keys = ["limit_up", "open_limit_up", "limit_down", "ever_limit_down", "feng_kou", "best_feng_kou",
                     "feng_xiang", "industry_rank"]
                     "feng_xiang", "industry_rank", "jingxuan_rank"]
        for key in self.keys:
            self.capture_status[key] = [False, True]
@@ -259,11 +261,19 @@
    def __exec_industry_rank(self):
        if not self.__can_capture(7):
            return
        results = kpl_api.getIndustryRealRankingInfo()
        results = kpl_api.getMarketIndustryRealRankingInfo()
        result = json.loads(results)
        self.logger.info(f"行业涨幅排行代码数量:{len(result['list'])}")
        self.__upload_data(self.keys[7], result)
    def __exec_jingxuan_rank(self):
        if not self.__can_capture(8):
            return
        results = kpl_api.getMarketJingXuanRealRankingInfo()
        result = json.loads(results)
        self.logger.info(f"精选流入排行代码数量:{len(result['list'])}")
        self.__upload_data(self.keys[8], result)
    # 开始所有的任务
    def __start_all_task(self, event):
        for i in range(0, len(self.items)):
kpl/gui.spec
@@ -46,5 +46,5 @@
    strip=False,
    upx=True,
    upx_exclude=[],
    name='开盘啦',
    name='开盘啦1',
)
kpl/kpl_api.py
@@ -82,7 +82,7 @@
# 市场行情-行业
def getMarketIndustryRealRankingInfo(orderJingE_DESC=True):
    data = f"Order={ 1 if orderJingE_DESC else 0}&a=RealRankingInfo&st=80&apiv=w32&Type=5&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=a38adabd-99ef-3116-8bb9-6d893c846e23&VerSion=5.8.0.2&Index=0&ZSType=4&"
    data = f"Order={ 1 if orderJingE_DESC else 0}&a=RealRankingInfo&st=20&apiv=w32&Type=5&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=a38adabd-99ef-3116-8bb9-6d893c846e23&VerSion=5.8.0.2&Index=0&ZSType=4&"
    response = __base_request("https://apphq.longhuvip.com/w1/api/index.php",
                              data=data)
    if response.status_code != 200:
@@ -92,7 +92,7 @@
# 市场行情-精选
def getMarketJingXuanRealRankingInfo(orderJingE_DESC=True):
    data = f"Order={ 1 if orderJingE_DESC else 0}&a=RealRankingInfo&st=80&apiv=w32&Type=5&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=a38adabd-99ef-3116-8bb9-6d893c846e23&VerSion=5.8.0.2&Index=0&ZSType=7&"
    data = f"Order={ 1 if orderJingE_DESC else 0}&a=RealRankingInfo&st=20&apiv=w32&Type=5&c=ZhiShuRanking&PhoneOSNew=1&DeviceID=a38adabd-99ef-3116-8bb9-6d893c846e23&VerSion=5.8.0.2&Index=0&ZSType=7&"
    response = __base_request("https://apphq.longhuvip.com/w1/api/index.php",
                              data=data)
    if response.status_code != 200:
main.py
@@ -11,7 +11,7 @@
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
from PyQt5.QtWidgets import QMainWindow, QApplication, QAction, QMessageBox
from PyQt5.QtCore import pyqtSlot, QObject, pyqtSignal, QTimer, QUrl, QPoint
from PyQt5.QtCore import Qt, pyqtSlot, QObject, pyqtSignal, QTimer, QUrl, QPoint
import gui_wx
import network_util
@@ -62,9 +62,9 @@
    def show_info(self, msg):
        QMessageBox.information(self.window, "提示", msg, QMessageBox.Yes)
    @pyqtSlot(str, str, str, str)
    def set_trade_info(self, code, name, trade_data, trade_record):
        self.window.set_trade_data(code, name, trade_data, trade_record)
    @pyqtSlot(str, str, str, str, str, str)
    def set_trade_info(self, code, name, trade_data, trade_record, initiative_buy_codes, passive_buy_codes):
        self.window.set_trade_data(code, name, trade_data, trade_record, initiative_buy_codes, passive_buy_codes)
    @pyqtSlot(str, str)
    def show_want_codes(self, plate, codes):
@@ -114,8 +114,9 @@
        self.webview.load(QUrl(url))
    # 设置交易数据
    def set_trade_data(self, code, code_name, trade_data, trade_record):
        self.webview.page().runJavaScript(f"app.set_trade_info('{code}','{code_name}','{trade_data}','{trade_record}')")
    def set_trade_data(self, code, code_name, trade_data, trade_record, initiative_buy_codes, passive_buy_codes):
        self.webview.page().runJavaScript(
            f"app.set_trade_info('{code}','{code_name}','{trade_data}','{trade_record}','{initiative_buy_codes}','{passive_buy_codes}')")
    def set_kpl_data(self, data):
        self.webview.page().runJavaScript(f"fill_kpl_data('{data}')")
@@ -142,8 +143,9 @@
    def show_warning(self, msg):
        QMessageBox.warning(self, "提示", msg, QMessageBox.Yes)
    def set_trade_data(self, code, code_name, trade_data, trade_record):
        self.secondWindow.set_trade_data(code, code_name, trade_data, trade_record)
    def set_trade_data(self, code, code_name, trade_data, trade_record, initiative_buy_codes, passive_buy_codes):
        self.secondWindow.set_trade_data(code, code_name, trade_data, trade_record, initiative_buy_codes,
                                         passive_buy_codes)
    # 设置目标代码
    def set_target_code(self, code):
@@ -251,6 +253,7 @@
        else:
            self.resize(1100, 1000)
            self.center()
        self.setWindowFlag(Qt.WindowStaysOnTopHint, True)
        self.webview = QWebEngineView()
        self.webview.settings().setAttribute(QWebEngineSettings.JavascriptEnabled, True)
        self.__menu()
@@ -264,10 +267,12 @@
        self.setCentralWidget(self.webview)
        self.show()
        self.webview.load(QUrl("http://192.168.3.122:8848/kp/index23-05-04.html"))
        self.webview.load(QUrl("http://192.168.3.252/kp/index23-05-04.html"))
        # self.webview.load(QUrl("http://127.0.0.1:8848/kp/index23-05-04.html"))
        self.secondWindow.show()
        self.secondWindow.loadUrl("http://192.168.3.122:8848/kp/banshuping.html")
        self.secondWindow.loadUrl("http://192.168.3.252/kp/codes_list.html")
        # self.secondWindow.loadUrl("http://127.0.0.1:8848/kp/codes_list.html")
        # 绑定槽函数
        self.signal_update_code.connect(self.set_target_code)
res/setting.conf
@@ -1,10 +1,10 @@
[config]
stay_on_top = 1
window_info = [[-1711, 194, 1280, 800], [1473, 621, 320, 195]]
xgb_window_info = [-1921, -8, 1920, 1017]
window_watch_float_info = [810, 487, 320, 195]
window_tick_info = [-1928, -8, 1936, 1056]
kp_second_window_info = [-587, 154, 500, 800]
xgb_window_info = [-759, 198, 2253, 1017]
window_watch_float_info = [1178, 372, 320, 195]
window_tick_info = [-1919, 1, 1918, 1038]
kp_second_window_info = [90, 201, 694, 800]
[juejin]
strategy_id = 95a982ce-fc2d-11ec-8ff3-0a0027000010
test.png
ths_util.py
@@ -38,14 +38,24 @@
    return None
# 获取交易句柄
def get_trade_hwnd():
    hwnds = win32_util.search_window("网上股票交易系统")
    if hwnds:
        hwnd = hwnds[0]
        return hwnd
    return None
def get_ths_main_content_hwnd():
    hwnds = win32_util.search_window("看盘页面")
    hwnds = win32_util.search_window("同花顺")
    if hwnds:
        hwnd = hwnds[0]
        hwnd = win32gui.FindWindowEx(hwnd, None, "AfxFrameOrView100s", None)
        return hwnd
    return None
# 批量点击事件
def betch_click(hwnd, ps, space_time=0.5):
    for p in ps:
tui_liu.spec
New file
@@ -0,0 +1,44 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
    ['zhibo\\tui_liu.py'],
    pathex=[],
    binaries=[],
    datas=[],
    hiddenimports=[],
    hookspath=[],
    hooksconfig={},
    runtime_hooks=[],
    excludes=[],
    win_no_prefer_redirects=False,
    win_private_assemblies=False,
    cipher=block_cipher,
    noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
    pyz,
    a.scripts,
    a.binaries,
    a.zipfiles,
    a.datas,
    [],
    name='tui_liu',
    debug=False,
    bootloader_ignore_signals=False,
    strip=False,
    upx=True,
    upx_exclude=[],
    runtime_tmpdir=None,
    console=True,
    disable_windowed_traceback=False,
    argv_emulation=False,
    target_arch=None,
    codesign_identity=None,
    entitlements_file=None,
)
win32_util.py
@@ -41,6 +41,11 @@
    win32gui.PostMessage(hwnd, win32con.WM_KEYDOWN, code, 0)
    win32gui.PostMessage(hwnd, win32con.WM_KEYUP, code, 0)
def visual_keyboard_F5(hwnd):
    win32gui.PostMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_F5, 0)
    win32gui.PostMessage(hwnd, win32con.WM_KEYUP, win32con.VK_F5, 0)
def window_capture(hwnd, rect):
    w = rect[2] - rect[0]