import re, os, sys
import urllib
import urllib2
import json
import requests
import datetime, time, subprocess
import threading, shutil

proc = None
workerRtn = False

def removeDir(chNl,tdDate,ytDate):    
    for dirs in os.listdir(chNl):        
        if dirs ==  tdDate or dirs == ytDate:
            continue
        else:
            if os.path.isdir(chNl+"/"+dirs):
                import shutil
                shutil.rmtree(chNl+"/"+dirs)
                print dirs

def ListFile(dirpath):
    return [("file '"+dirpath+"/"+f+"'") for f in os.listdir(dirpath)]
    
def make_p(dir,dir1=None):
    if not os.path.isdir(dir):
        os.makedirs(dir,0777)
        if dir1:
            os.makedirs(dir1,0777)        
        return True
    else:
        return False
def makefile(fpath, data):    
    text_file = open(fpath, "w")
    text_file.write(data)
    text_file.close()
def makefileA(fpath, data):    
    text_file = open(fpath, "a")
    text_file.write(data)
    text_file.close()
  
def readfile(fpath):
    data = ''
    text_file = open(fpath, "r")
    data = text_file.read()
    text_file.close()
    return data

def make_request(url):
    headers = {'User-Agent':'AppleCoreMedia/1.0.0.12B411 (iPhone; U; CPU OS 8_1 like Mac OS X; en_gb)', 'X-Forwarded-For': '205.147.101.142', 'Referer': 'http://www.hotstar.com/'}
    req = urllib2.Request(url, None, headers);
    resp = urllib2.urlopen(req).read().decode("utf-8");    
    return resp
    
def get_channels():
    html = make_requestN('http://account.hotstar.com/AVS/besc?action=GetCatalogueTree&appVersion=5.0.21&categoryId=564&channel=PCTV')
    data = html    
    html = json.loads(data)
    for result in html['resultObj']['categoryList'][0]['categoryList']:
            title = result['contentTitle'].encode('ascii','ignore')                        
            get_channel_shows('http://account.hotstar.com/AVS/besc?action=GetArrayContentList&categoryId='+str(result['categoryId'])+'&channel=PCTV', title)
    makefile("showslist",showslist)
    print readfile("showslist")

def get_kdata(url, headers = None):   
    try:
        if headers is None:
            headers = {'User-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
        req = urllib2.Request(url, None, headers)
        response = urllib2.urlopen(req)
        data = response.read()
        response.close()
        return data
    except:pass       
    
def get_channel_shows(url, title):
    global showslist
    html = make_request(url)
    data=html
    html=json.loads(data)
    for result in html['resultObj']['contentList']:
            stitle = result['contentTitle'].encode('ascii','ignore')            
            showslist = showslist + title + '_' + stitle + '_' + str(result['contentId'])+'\n'
    print "."        
              
def get_seasons_ep_links(url):
    for Hchnl in shUrl:
        Hshows = Hchnl.split(",")
        chNl = Hshows[0]
        make_p(chNl)        
        tdDate = datetime.datetime.utcfromtimestamp(time.time()).strftime('%Y-%m-%d')
        date = datetime.datetime.strptime(tdDate, "%Y-%m-%d")
        modified_date = date - datetime.timedelta(days=1)
        ytDate = datetime.datetime.strftime(modified_date, "%Y-%m-%d")
        removeDir(chNl,tdDate,ytDate)        
        make_p(chNl+"/"+tdDate,chNl+"/"+ytDate)
        #data = json.loads(getApi("https://api.hotstar.com/o/v1/channel/detail?id=3&avsCategoryId=745&contentId=821&offset=0&size=200&pageNo=1&perPage=200"))['body']['results']["assets"]["items"]
        for showsID in Hshows[1::]:        
            html = json.loads(getApi("https://api.hotstar.com/o/v1/tray/g/1/items?eid="+showsID+"&etid=0&tao=0&tas=1"))['body']['results']["items"]
            for result in html:
                epno = str(result['episodeNo'])
                if int(result['episodeNo'])<10:
                    epno = '0'+ epno
                fname = (result['showShortTitle']+'_S'+str(result['seasonNo'])+'E'+epno).replace(' ','_')
                epDate = datetime.datetime.utcfromtimestamp(result['broadCastDate']).strftime('%Y-%m-%d')            
                if not os.path.isfile(chNl+"/"+epDate+"/"+fname+".mp4") and (epDate == tdDate or epDate == ytDate):
                    print "Downloading: "+fname                
                    vurl = json.loads(getApi('https://api.hotstar.com/h/v1/play?contentId='+str(result['contentId'])))['body']['results']['item']['playbackUrl']
##                    tsthread = threading.Thread(target=retriveVoD1, args=(vurl, fname, result['showShortTitle']+' - S'+str(result['seasonNo'])+'E'+epno,epDate,chNl))
##                    tsthread.daemon=True
##                    tsthread.start()                                        
                    retriveVoD1(vurl, fname, result['showShortTitle']+' - S'+str(result['seasonNo'])+'E'+epno,epDate,chNl)
                    return
                else :
                    print fname+" already exist."
        for thread in threading.enumerate():
            #print thread.getName()
            if thread.getName() == "Thread-1" or thread.getName() == "MainThread" or thread.getName() == "SockThread":
                continue
            else:
                thread.join()
        ytFiles = ["ffconcat version 1.0"]
        tdFiles = ListFile(chNl+"/"+tdDate)
        if len(tdFiles) < 3:
            ytFiles.extend(ListFile(chNl+"/"+ytDate))
        ytFiles.extend(tdFiles)
        if len(ytFiles) > 1:
            ytFiles.append("file '"+chNl+"/list.txt'")
        makefile(chNl+"/"+"/list.txt",('\n'.join(str(line) for line in ytFiles)))
        print ('\n'.join(str(line) for line in ytFiles))
        print "Done: " + chNl
    

def findquality(data, quality, url):
    i = 0
    lines = data.splitlines()
    for line in lines:
        if (i == 1):
            break
        if quality in line:
            i = i + 1
    if 'http' not in line:
        url = url.replace('master.m3u8', line)
    else:
        url = line
    return url

def retriveVoD(url, fname, stitle,tdDate,chNl):
    try:
        global proc    
        import cookielib
        headers = {'User-agent': 'AppleCoreMedia/1.0.0.12B411 (iPhone; U; CPU OS 8_1 like Mac OS X; en_gb)', 'X-Forwarded-For': '139.59.59.250'}
        cookie_jar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
        urllib2.install_opener(opener)
        req = urllib2.Request(url, None, headers);
        url = findquality(urllib2.urlopen(req).read(), '1280x720', url)
        try:
            ua = "User-Agent: AppleCoreMedia/1.0.0.12B411 (iPhone; U; CPU OS 8_1 like Mac OS X; en_gb)\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate, br\r\nAccept-Language: en-US,en;q=0.5\r\nConnection: keep-alive\r\nCookie: "
            for cookie in cookie_jar:
                ua = ua + re.findall('Cookie (.*?) ', str(cookie), re.DOTALL)[0]+"; "
            makefile("ua",ua)
        except:pass    
        print url
        makefile(chNl+"/"+tdDate+"/"+fname+".mp4","")    
        #cmd = 'ffmpeg -headers "'+readfile("ua")+'" -i "'+url+'" -i "Star_Plus.png" -filter_complex "overlay=x=W-w-50:y=50, drawtext=fontfile=/usr/share/fonts/truetype/ttf-dejavu/DejaVuSerif.ttf:text='+stitle+':fontsize=22:fontcolor=white:x=60:y=30" -hls_time 10 -hls_list_size 0 -f hls chdata/output.m3u8'
        #proc = subprocess.Popen(cmd,shell=True)
        #proc.wait()
    except:pass


def retriveVoD1(url, fname, stitle,tdDate,chNl):
    import shutil
    import requests
    make_p(chNl+"/tempts")
    s = requests.Session()
    s.headers.update({'User-agent': 'AppleCoreMedia/1.0.0.12B411 (iPhone; U; CPU OS 8_1 like Mac OS X; en_gb)', 'X-Forwarded-For': '139.59.59.252'})
    r = s.get(url)
    url = findquality(r.text, '1280x720', url)
    urltag = url.split("?")[0].split("/")[-1]
    print urltag
    r = s.get(url)
    url = url.split("?")[0]
    tsdata = r.text.splitlines()
    merged = open(chNl+'/tsdata.ts', "wb")
    for ts in tsdata:
        if ".ts" in ts:
            if "http" not in ts:
                tsurl = url.replace(urltag,ts)
            else:
                tsurl = ts
            tsthread = threading.Thread(target=tsdonloader, args=(tsurl,ts,chNl))
            tsthread.daemon=True
            tsthread.start()  
    for thread in threading.enumerate():
        #print thread.getName()
        if thread.getName() == "Thread-1" or thread.getName() == "MainThread" or thread.getName() == "SockThread":
            continue
        else:
            thread.join()
    for ts in tsdata:
        if ".ts" in ts:
            if "http" not in ts:
                with open(chNl+'/tempts/'+ts, 'rb') as mergefile:
                    shutil.copyfileobj(mergefile, merged)
    cmd = 'ffmpeg -i "'+chNl+'/tsdata.ts" -c copy -bsf:a aac_adtstoasc -movflags +faststart '+chNl+'/'+tdDate+'/'+fname+'.mp4'
    proc = subprocess.Popen(cmd,shell=True)
    proc.wait()    
    try:shutil.rmtree(chNl+"/tempts")   
    except:pass                    

def tsdonloader(tsurl,ts,chNl):
    urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
    urllib.urlretrieve(tsurl, chNl+"/tempts/"+ts)

##    with open('temp.ts', 'rb') as mergefile:
##        shutil.copyfileobj(mergefile, merged)        
##        print tsurl

def getApi(url):
    import requests
    s = requests.Session()
    s.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0', 'X-Forwarded-For': '205.147.101.141', 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'https://www.hotstar.com/', 'x-country-code': 'IN', 'x-platform-code': 'TABLET', 'x-region-code': 'undefined', 'hotstarauth': gettoken(), 'x-dst-drm':'', 'x-device-info': '68a39ade-05a4-4d9c-9e45-b75e831b674c;Nexus 5;Android;6.0', 'origin': 'https://www.hotstar.com', 'Connection': 'keep-alive'})
    r = s.get(url)
    resp = r.text
    #print resp
    return resp

def gettoken():
    return 'st=1539990711~exp=1541286711~acl=/*~hmac=907300a0176c745f569036613daabe331ca89dec0217e54c1594630854f1ddb8' #
    




maxResult = '3'        
shUrl = ("StarPlus,43,6,1416,631,1531#StarVijay,43,6,1416,631,1531").split("#")

get_seasons_ep_links("")
